From 5ddbf16f35234dc1781de9d17310a345ac1524de Mon Sep 17 00:00:00 2001 From: Eric Schmidt Date: Fri, 4 Dec 2020 16:39:58 -0800 Subject: [PATCH] feat: initial generation of enhanced types (#102) * feat: initial generation of enhanced types --- docs/conf.py | 6 +- docs/definition_v1beta1/types.rst | 6 + docs/instance_v1beta1/types.rst | 6 + docs/params_v1beta1/types.rst | 6 + docs/prediction_v1beta1/types.rst | 6 + .../schema/predict/instance/__init__.py | 56 ++ .../v1beta1/schema/predict/instance/py.typed | 2 + .../predict/instance_v1beta1/__init__.py | 39 ++ .../schema/predict/instance_v1beta1/py.typed | 2 + .../instance_v1beta1/types/__init__.py | 39 ++ .../types/image_classification.py | 51 ++ .../types/image_object_detection.py | 51 ++ .../types/image_segmentation.py | 45 ++ .../types/text_classification.py | 44 ++ .../instance_v1beta1/types/text_extraction.py | 55 ++ .../instance_v1beta1/types/text_sentiment.py | 44 ++ .../types/video_action_recognition.py | 64 +++ .../types/video_classification.py | 64 +++ .../types/video_object_tracking.py | 64 +++ .../v1beta1/schema/predict/params/__init__.py | 44 ++ .../v1beta1/schema/predict/params/py.typed | 2 + .../schema/predict/params_v1beta1/__init__.py | 33 ++ .../schema/predict/params_v1beta1/py.typed | 2 + .../predict/params_v1beta1/types/__init__.py | 33 ++ .../types/image_classification.py | 47 ++ .../types/image_object_detection.py | 48 ++ .../types/image_segmentation.py | 42 ++ .../types/video_action_recognition.py | 48 ++ .../types/video_classification.py | 85 +++ .../types/video_object_tracking.py | 54 ++ .../schema/predict/prediction/__init__.py | 64 +++ .../schema/predict/prediction/py.typed | 2 + .../predict/prediction_v1beta1/__init__.py | 43 ++ .../predict/prediction_v1beta1/py.typed | 2 + .../prediction_v1beta1/types/__init__.py | 43 ++ .../types/classification.py | 51 ++ .../types/image_object_detection.py | 64 +++ .../types/image_segmentation.py | 57 ++ .../types/tabular_classification.py | 47 ++ .../types/tabular_regression.py | 46 ++ .../types/text_extraction.py | 67 +++ .../types/text_sentiment.py | 68 +++ .../types/time_series_forecasting.py | 46 ++ .../types/video_action_recognition.py | 74 +++ .../types/video_classification.py | 90 ++++ .../types/video_object_tracking.py | 115 +++++ .../schema/trainingjob/definition/__init__.py | 132 +++++ .../schema/trainingjob/definition/py.typed | 2 + .../definition_v1beta1/__init__.py | 77 +++ .../trainingjob/definition_v1beta1/py.typed | 2 + .../definition_v1beta1/types/__init__.py | 99 ++++ .../types/automl_forecasting.py | 486 ++++++++++++++++++ .../types/automl_image_classification.py | 143 ++++++ .../types/automl_image_object_detection.py | 128 +++++ .../types/automl_image_segmentation.py | 121 +++++ .../definition_v1beta1/types/automl_tables.py | 447 ++++++++++++++++ .../types/automl_text_classification.py | 52 ++ .../types/automl_text_extraction.py | 43 ++ .../types/automl_text_sentiment.py | 59 +++ .../types/automl_video_action_recognition.py | 58 +++ .../types/automl_video_classification.py | 58 +++ .../types/automl_video_object_tracking.py | 62 +++ .../export_evaluated_data_items_config.py | 52 ++ google/cloud/aiplatform_v1beta1/__init__.py | 4 +- .../services/dataset_service/async_client.py | 20 +- .../dataset_service/transports/base.py | 20 +- .../services/endpoint_service/async_client.py | 14 +- .../endpoint_service/transports/base.py | 14 +- .../services/job_service/async_client.py | 40 +- .../services/job_service/transports/base.py | 40 +- .../services/model_service/async_client.py | 20 +- .../services/model_service/transports/base.py | 22 +- .../services/pipeline_service/async_client.py | 10 +- .../pipeline_service/transports/base.py | 10 +- .../prediction_service/async_client.py | 4 +- .../prediction_service/transports/base.py | 4 +- .../specialist_pool_service/async_client.py | 10 +- .../transports/base.py | 10 +- .../aiplatform_v1beta1/types/__init__.py | 412 +++++++-------- noxfile.py | 7 +- synth.metadata | 15 +- synth.py | 14 + 82 files changed, 4339 insertions(+), 339 deletions(-) create mode 100644 docs/definition_v1beta1/types.rst create mode 100644 docs/instance_v1beta1/types.rst create mode 100644 docs/params_v1beta1/types.rst create mode 100644 docs/prediction_v1beta1/types.rst create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py diff --git a/docs/conf.py b/docs/conf.py index effa4a8f1f..98e68be241 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -345,10 +345,10 @@ # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "python": ("https://python.readthedocs.org/en/latest/", None), + "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None), "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,), - "grpc": ("https://grpc.io/grpc/python/", None), + "grpc": ("https://grpc.github.io/grpc/python/", None), "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None), } diff --git a/docs/definition_v1beta1/types.rst b/docs/definition_v1beta1/types.rst new file mode 100644 index 0000000000..3f351d03fc --- /dev/null +++ b/docs/definition_v1beta1/types.rst @@ -0,0 +1,6 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition v1beta1 API +=================================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types + :members: + :show-inheritance: diff --git a/docs/instance_v1beta1/types.rst b/docs/instance_v1beta1/types.rst new file mode 100644 index 0000000000..c52ae4800c --- /dev/null +++ b/docs/instance_v1beta1/types.rst @@ -0,0 +1,6 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API +============================================================================= + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types + :members: + :show-inheritance: diff --git a/docs/params_v1beta1/types.rst b/docs/params_v1beta1/types.rst new file mode 100644 index 0000000000..ce7a29cb01 --- /dev/null +++ b/docs/params_v1beta1/types.rst @@ -0,0 +1,6 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API +=========================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types + :members: + :show-inheritance: diff --git a/docs/prediction_v1beta1/types.rst b/docs/prediction_v1beta1/types.rst new file mode 100644 index 0000000000..cdbe7f2842 --- /dev/null +++ b/docs/prediction_v1beta1/types.rst @@ -0,0 +1,6 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API +=============================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types + :members: + :show-inheritance: diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py new file mode 100644 index 0000000000..2f514ac4ed --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_classification import ( + ImageClassificationPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_object_detection import ( + ImageObjectDetectionPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_segmentation import ( + ImageSegmentationPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_classification import ( + TextClassificationPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_extraction import ( + TextExtractionPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_sentiment import ( + TextSentimentPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_action_recognition import ( + VideoActionRecognitionPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_classification import ( + VideoClassificationPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_object_tracking import ( + VideoObjectTrackingPredictionInstance, +) + +__all__ = ( + "ImageClassificationPredictionInstance", + "ImageObjectDetectionPredictionInstance", + "ImageSegmentationPredictionInstance", + "TextClassificationPredictionInstance", + "TextExtractionPredictionInstance", + "TextSentimentPredictionInstance", + "VideoActionRecognitionPredictionInstance", + "VideoClassificationPredictionInstance", + "VideoObjectTrackingPredictionInstance", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed b/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed new file mode 100644 index 0000000000..46ccbaf568 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py new file mode 100644 index 0000000000..f6d9a128ad --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .types.image_classification import ImageClassificationPredictionInstance +from .types.image_object_detection import ImageObjectDetectionPredictionInstance +from .types.image_segmentation import ImageSegmentationPredictionInstance +from .types.text_classification import TextClassificationPredictionInstance +from .types.text_extraction import TextExtractionPredictionInstance +from .types.text_sentiment import TextSentimentPredictionInstance +from .types.video_action_recognition import VideoActionRecognitionPredictionInstance +from .types.video_classification import VideoClassificationPredictionInstance +from .types.video_object_tracking import VideoObjectTrackingPredictionInstance + + +__all__ = ( + "ImageObjectDetectionPredictionInstance", + "ImageSegmentationPredictionInstance", + "TextClassificationPredictionInstance", + "TextExtractionPredictionInstance", + "TextSentimentPredictionInstance", + "VideoActionRecognitionPredictionInstance", + "VideoClassificationPredictionInstance", + "VideoObjectTrackingPredictionInstance", + "ImageClassificationPredictionInstance", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed new file mode 100644 index 0000000000..46ccbaf568 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py new file mode 100644 index 0000000000..3160c08e1d --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .image_classification import ImageClassificationPredictionInstance +from .image_object_detection import ImageObjectDetectionPredictionInstance +from .image_segmentation import ImageSegmentationPredictionInstance +from .text_classification import TextClassificationPredictionInstance +from .text_extraction import TextExtractionPredictionInstance +from .text_sentiment import TextSentimentPredictionInstance +from .video_action_recognition import VideoActionRecognitionPredictionInstance +from .video_classification import VideoClassificationPredictionInstance +from .video_object_tracking import VideoObjectTrackingPredictionInstance + + +__all__ = ( + "ImageClassificationPredictionInstance", + "ImageObjectDetectionPredictionInstance", + "ImageSegmentationPredictionInstance", + "TextClassificationPredictionInstance", + "TextExtractionPredictionInstance", + "TextSentimentPredictionInstance", + "VideoActionRecognitionPredictionInstance", + "VideoClassificationPredictionInstance", + "VideoObjectTrackingPredictionInstance", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py new file mode 100644 index 0000000000..84b1ef0bbe --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"ImageClassificationPredictionInstance",}, +) + + +class ImageClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Image Classification. + + Attributes: + content (str): + The image bytes or GCS URI to make the + prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..79c3efc2c6 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"ImageObjectDetectionPredictionInstance",}, +) + + +class ImageObjectDetectionPredictionInstance(proto.Message): + r"""Prediction input format for Image Object Detection. + + Attributes: + content (str): + The image bytes or GCS URI to make the + prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..5a3232c6d2 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"ImageSegmentationPredictionInstance",}, +) + + +class ImageSegmentationPredictionInstance(proto.Message): + r"""Prediction input format for Image Segmentation. + + Attributes: + content (str): + The image bytes to make the predictions on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/png + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py new file mode 100644 index 0000000000..a615dc7e49 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"TextClassificationPredictionInstance",}, +) + + +class TextClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Text Classification. + + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py new file mode 100644 index 0000000000..c6fecf80b7 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"TextExtractionPredictionInstance",}, +) + + +class TextExtractionPredictionInstance(proto.Message): + r"""Prediction input format for Text Extraction. + + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + key (str): + This field is only used for batch prediction. + If a key is provided, the batch prediction + result will by mapped to this key. If omitted, + then the batch prediction result will contain + the entire input instance. AI Platform will not + check if keys in the request are duplicates, so + it is up to the caller to ensure the keys are + unique. + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + key = proto.Field(proto.STRING, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py new file mode 100644 index 0000000000..69836d0e96 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"TextSentimentPredictionInstance",}, +) + + +class TextSentimentPredictionInstance(proto.Message): + r"""Prediction input format for Text Sentiment. + + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..89be6318f8 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"VideoActionRecognitionPredictionInstance",}, +) + + +class VideoActionRecognitionPredictionInstance(proto.Message): + r"""Prediction input format for Video Action Recognition. + + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "Infinity" is + allowed, which means the end of the video. + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + time_segment_start = proto.Field(proto.STRING, number=3) + + time_segment_end = proto.Field(proto.STRING, number=4) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..41ab3bc217 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"VideoClassificationPredictionInstance",}, +) + + +class VideoClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Video Classification. + + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "Infinity" is + allowed, which means the end of the video. + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + time_segment_start = proto.Field(proto.STRING, number=3) + + time_segment_end = proto.Field(proto.STRING, number=4) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..3729c14816 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"VideoObjectTrackingPredictionInstance",}, +) + + +class VideoObjectTrackingPredictionInstance(proto.Message): + r"""Prediction input format for Video Object Tracking. + + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "Infinity" is + allowed, which means the end of the video. + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + time_segment_start = proto.Field(proto.STRING, number=3) + + time_segment_end = proto.Field(proto.STRING, number=4) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py new file mode 100644 index 0000000000..dc7cd58e9a --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_classification import ( + ImageClassificationPredictionParams, +) +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_object_detection import ( + ImageObjectDetectionPredictionParams, +) +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_segmentation import ( + ImageSegmentationPredictionParams, +) +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_action_recognition import ( + VideoActionRecognitionPredictionParams, +) +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_classification import ( + VideoClassificationPredictionParams, +) +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_object_tracking import ( + VideoObjectTrackingPredictionParams, +) + +__all__ = ( + "ImageClassificationPredictionParams", + "ImageObjectDetectionPredictionParams", + "ImageSegmentationPredictionParams", + "VideoActionRecognitionPredictionParams", + "VideoClassificationPredictionParams", + "VideoObjectTrackingPredictionParams", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed b/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed new file mode 100644 index 0000000000..acdcd7bc60 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py new file mode 100644 index 0000000000..79fb1c2097 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .types.image_classification import ImageClassificationPredictionParams +from .types.image_object_detection import ImageObjectDetectionPredictionParams +from .types.image_segmentation import ImageSegmentationPredictionParams +from .types.video_action_recognition import VideoActionRecognitionPredictionParams +from .types.video_classification import VideoClassificationPredictionParams +from .types.video_object_tracking import VideoObjectTrackingPredictionParams + + +__all__ = ( + "ImageObjectDetectionPredictionParams", + "ImageSegmentationPredictionParams", + "VideoActionRecognitionPredictionParams", + "VideoClassificationPredictionParams", + "VideoObjectTrackingPredictionParams", + "ImageClassificationPredictionParams", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed new file mode 100644 index 0000000000..acdcd7bc60 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py new file mode 100644 index 0000000000..39202720fa --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .image_classification import ImageClassificationPredictionParams +from .image_object_detection import ImageObjectDetectionPredictionParams +from .image_segmentation import ImageSegmentationPredictionParams +from .video_action_recognition import VideoActionRecognitionPredictionParams +from .video_classification import VideoClassificationPredictionParams +from .video_object_tracking import VideoObjectTrackingPredictionParams + + +__all__ = ( + "ImageClassificationPredictionParams", + "ImageObjectDetectionPredictionParams", + "ImageSegmentationPredictionParams", + "VideoActionRecognitionPredictionParams", + "VideoClassificationPredictionParams", + "VideoObjectTrackingPredictionParams", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py new file mode 100644 index 0000000000..681a8c3d87 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.params", + manifest={"ImageClassificationPredictionParams",}, +) + + +class ImageClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Classification. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is 10. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + max_predictions = proto.Field(proto.INT32, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..146dd324b7 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.params", + manifest={"ImageObjectDetectionPredictionParams",}, +) + + +class ImageObjectDetectionPredictionParams(proto.Message): + r"""Prediction model parameters for Image Object Detection. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + Note that number of returned predictions is also + limited by metadata's predictionsLimit. Default + value is 10. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + max_predictions = proto.Field(proto.INT32, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..aa11739a61 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.params", + manifest={"ImageSegmentationPredictionParams",}, +) + + +class ImageSegmentationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Segmentation. + + Attributes: + confidence_threshold (float): + When the model predicts category of pixels of + the image, it will only provide predictions for + pixels that it is at least this much confident + about. All other pixels will be classified as + background. Default value is 0.5. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..c1f8f9f3bc --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.params", + manifest={"VideoActionRecognitionPredictionParams",}, +) + + +class VideoActionRecognitionPredictionParams(proto.Message): + r"""Prediction model parameters for Video Action Recognition. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + max_predictions = proto.Field(proto.INT32, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..1b8d84a7d1 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.params", + manifest={"VideoClassificationPredictionParams",}, +) + + +class VideoClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Video Classification. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is + 10,000. + segment_classification (bool): + Set to true to request segment-level + classification. AI Platform returns labels and + their confidence scores for the entire time + segment of the video that user specified in the + input instance. Default value is true + shot_classification (bool): + Set to true to request shot-level + classification. AI Platform determines the + boundaries for each camera shot in the entire + time segment of the video that user specified in + the input instance. AI Platform then returns + labels and their confidence scores for each + detected shot, along with the start and end time + of the shot. + WARNING: Model evaluation is not done for this + classification type, the quality of it depends + on the training data, but there are no metrics + provided to describe that quality. + Default value is false + one_sec_interval_classification (bool): + Set to true to request classification for a + video at one-second intervals. AI Platform + returns labels and their confidence scores for + each second of the entire time segment of the + video that user specified in the input WARNING: + Model evaluation is not done for this + classification type, the quality of it depends + on the training data, but there are no metrics + provided to describe that quality. Default value + is false + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + max_predictions = proto.Field(proto.INT32, number=2) + + segment_classification = proto.Field(proto.BOOL, number=3) + + shot_classification = proto.Field(proto.BOOL, number=4) + + one_sec_interval_classification = proto.Field(proto.BOOL, number=5) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..4c0b6846bc --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.params", + manifest={"VideoObjectTrackingPredictionParams",}, +) + + +class VideoObjectTrackingPredictionParams(proto.Message): + r"""Prediction model parameters for Video Object Tracking. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + min_bounding_box_size (float): + Only bounding boxes with shortest edge at + least that long as a relative value of video + frame size are returned. Default value is 0.0. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + max_predictions = proto.Field(proto.INT32, number=2) + + min_bounding_box_size = proto.Field(proto.FLOAT, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py new file mode 100644 index 0000000000..4447d3770a --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.classification import ( + ClassificationPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_object_detection import ( + ImageObjectDetectionPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_segmentation import ( + ImageSegmentationPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_classification import ( + TabularClassificationPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_regression import ( + TabularRegressionPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_extraction import ( + TextExtractionPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_sentiment import ( + TextSentimentPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.time_series_forecasting import ( + TimeSeriesForecastingPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_action_recognition import ( + VideoActionRecognitionPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_classification import ( + VideoClassificationPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_object_tracking import ( + VideoObjectTrackingPredictionResult, +) + +__all__ = ( + "ClassificationPredictionResult", + "ImageObjectDetectionPredictionResult", + "ImageSegmentationPredictionResult", + "TabularClassificationPredictionResult", + "TabularRegressionPredictionResult", + "TextExtractionPredictionResult", + "TextSentimentPredictionResult", + "TimeSeriesForecastingPredictionResult", + "VideoActionRecognitionPredictionResult", + "VideoClassificationPredictionResult", + "VideoObjectTrackingPredictionResult", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed new file mode 100644 index 0000000000..8cf97d7107 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py new file mode 100644 index 0000000000..37066cd8b3 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .types.classification import ClassificationPredictionResult +from .types.image_object_detection import ImageObjectDetectionPredictionResult +from .types.image_segmentation import ImageSegmentationPredictionResult +from .types.tabular_classification import TabularClassificationPredictionResult +from .types.tabular_regression import TabularRegressionPredictionResult +from .types.text_extraction import TextExtractionPredictionResult +from .types.text_sentiment import TextSentimentPredictionResult +from .types.time_series_forecasting import TimeSeriesForecastingPredictionResult +from .types.video_action_recognition import VideoActionRecognitionPredictionResult +from .types.video_classification import VideoClassificationPredictionResult +from .types.video_object_tracking import VideoObjectTrackingPredictionResult + + +__all__ = ( + "ImageObjectDetectionPredictionResult", + "ImageSegmentationPredictionResult", + "TabularClassificationPredictionResult", + "TabularRegressionPredictionResult", + "TextExtractionPredictionResult", + "TextSentimentPredictionResult", + "TimeSeriesForecastingPredictionResult", + "VideoActionRecognitionPredictionResult", + "VideoClassificationPredictionResult", + "VideoObjectTrackingPredictionResult", + "ClassificationPredictionResult", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed new file mode 100644 index 0000000000..8cf97d7107 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py new file mode 100644 index 0000000000..2d6c8a98d3 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .classification import ClassificationPredictionResult +from .image_object_detection import ImageObjectDetectionPredictionResult +from .image_segmentation import ImageSegmentationPredictionResult +from .tabular_classification import TabularClassificationPredictionResult +from .tabular_regression import TabularRegressionPredictionResult +from .text_extraction import TextExtractionPredictionResult +from .text_sentiment import TextSentimentPredictionResult +from .time_series_forecasting import TimeSeriesForecastingPredictionResult +from .video_action_recognition import VideoActionRecognitionPredictionResult +from .video_classification import VideoClassificationPredictionResult +from .video_object_tracking import VideoObjectTrackingPredictionResult + + +__all__ = ( + "ClassificationPredictionResult", + "ImageObjectDetectionPredictionResult", + "ImageSegmentationPredictionResult", + "TabularClassificationPredictionResult", + "TabularRegressionPredictionResult", + "TextExtractionPredictionResult", + "TextSentimentPredictionResult", + "TimeSeriesForecastingPredictionResult", + "VideoActionRecognitionPredictionResult", + "VideoClassificationPredictionResult", + "VideoObjectTrackingPredictionResult", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py new file mode 100644 index 0000000000..3bfe82f64e --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"ClassificationPredictionResult",}, +) + + +class ClassificationPredictionResult(proto.Message): + r"""Prediction output format for Image and Text Classification. + + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField(proto.INT64, number=1) + + display_names = proto.RepeatedField(proto.STRING, number=2) + + confidences = proto.RepeatedField(proto.FLOAT, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..1bf5002c2a --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import struct_pb2 as struct # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"ImageObjectDetectionPredictionResult",}, +) + + +class ImageObjectDetectionPredictionResult(proto.Message): + r"""Prediction output format for Image Object Detection. + + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + bboxes (Sequence[~.struct.ListValue]): + Bounding boxes, i.e. the rectangles over the image, that + pinpoint the found AnnotationSpecs. Given in order that + matches the IDs. Each bounding box is an array of 4 numbers + ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent + the extremal coordinates of the box. They are relative to + the image size, and the point 0,0 is in the top left of the + image. + """ + + ids = proto.RepeatedField(proto.INT64, number=1) + + display_names = proto.RepeatedField(proto.STRING, number=2) + + confidences = proto.RepeatedField(proto.FLOAT, number=3) + + bboxes = proto.RepeatedField(proto.MESSAGE, number=4, message=struct.ListValue,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..195dea6f79 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"ImageSegmentationPredictionResult",}, +) + + +class ImageSegmentationPredictionResult(proto.Message): + r"""Prediction output format for Image Segmentation. + + Attributes: + category_mask (bytes): + A PNG image where each pixel in the mask + represents the category in which the pixel in + the original image was predicted to belong to. + The size of this image will be the same as the + original image. The mapping between the + AnntoationSpec and the color can be found in + model's metadata. The model will choose the most + likely category and if none of the categories + reach the confidence threshold, the pixel will + be marked as background. + confidence_mask (bytes): + A one channel image which is encoded as an + 8bit lossless PNG. The size of the image will be + the same as the original image. For a specific + pixel, darker color means less confidence in + correctness of the cateogry in the categoryMask + for the corresponding pixel. Black means no + confidence and white means complete confidence. + """ + + category_mask = proto.Field(proto.BYTES, number=1) + + confidence_mask = proto.Field(proto.BYTES, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py new file mode 100644 index 0000000000..4906ad59a5 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"TabularClassificationPredictionResult",}, +) + + +class TabularClassificationPredictionResult(proto.Message): + r"""Prediction output format for Tabular Classification. + + Attributes: + classes (Sequence[str]): + The name of the classes being classified, + contains all possible values of the target + column. + scores (Sequence[float]): + The model's confidence in each class being + correct, higher value means higher confidence. + The N-th score corresponds to the N-th class in + classes. + """ + + classes = proto.RepeatedField(proto.STRING, number=1) + + scores = proto.RepeatedField(proto.FLOAT, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py new file mode 100644 index 0000000000..71d535c1f0 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"TabularRegressionPredictionResult",}, +) + + +class TabularRegressionPredictionResult(proto.Message): + r"""Prediction output format for Tabular Regression. + + Attributes: + value (float): + The regression value. + lower_bound (float): + The lower bound of the prediction interval. + upper_bound (float): + The upper bound of the prediction interval. + """ + + value = proto.Field(proto.FLOAT, number=1) + + lower_bound = proto.Field(proto.FLOAT, number=2) + + upper_bound = proto.Field(proto.FLOAT, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py new file mode 100644 index 0000000000..e3c10b5d75 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"TextExtractionPredictionResult",}, +) + + +class TextExtractionPredictionResult(proto.Message): + r"""Prediction output format for Text Extraction. + + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + text_segment_start_offsets (Sequence[int]): + The start offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + text_segment_end_offsets (Sequence[int]): + The end offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField(proto.INT64, number=1) + + display_names = proto.RepeatedField(proto.STRING, number=2) + + text_segment_start_offsets = proto.RepeatedField(proto.INT64, number=3) + + text_segment_end_offsets = proto.RepeatedField(proto.INT64, number=4) + + confidences = proto.RepeatedField(proto.FLOAT, number=5) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py new file mode 100644 index 0000000000..192e50419d --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform.v1beta1.schema.predict.instance import text_sentiment_pb2 as gcaspi_text_sentiment # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"TextSentimentPredictionResult",}, +) + + +class TextSentimentPredictionResult(proto.Message): + r"""Represents a line of JSONL in the text sentiment batch + prediction output file. This is a hack to allow printing of + integer values. + + Attributes: + instance (~.gcaspi_text_sentiment.TextSentimentPredictionInstance): + User's input instance. + prediction (~.gcaspp_text_sentiment.TextSentimentPredictionResult.Prediction): + The prediction result. + """ + + class Prediction(proto.Message): + r"""Prediction output format for Text Sentiment. + + Attributes: + sentiment (int): + The integer sentiment labels between 0 + (inclusive) and sentimentMax label (inclusive), + while 0 maps to the least positive sentiment and + sentimentMax maps to the most positive one. The + higher the score is, the more positive the + sentiment in the text snippet is. Note: + sentimentMax is an integer value between 1 + (inclusive) and 10 (inclusive). + """ + + sentiment = proto.Field(proto.INT32, number=1) + + instance = proto.Field( + proto.MESSAGE, + number=1, + message=gcaspi_text_sentiment.TextSentimentPredictionInstance, + ) + + prediction = proto.Field(proto.MESSAGE, number=2, message=Prediction,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py new file mode 100644 index 0000000000..38bd8e3c85 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"TimeSeriesForecastingPredictionResult",}, +) + + +class TimeSeriesForecastingPredictionResult(proto.Message): + r"""Prediction output format for Time Series Forecasting. + + Attributes: + value (float): + The regression value. + lower_bound (float): + The lower bound of the prediction interval. + upper_bound (float): + The upper bound of the prediction interval. + """ + + value = proto.Field(proto.FLOAT, number=1) + + lower_bound = proto.Field(proto.FLOAT, number=2) + + upper_bound = proto.Field(proto.FLOAT, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..f76b51899b --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import wrappers_pb2 as wrappers # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"VideoActionRecognitionPredictionResult",}, +) + + +class VideoActionRecognitionPredictionResult(proto.Message): + r"""Prediction output format for Video Action Recognition. + + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (~.duration.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (~.duration.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (~.wrappers.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + time_segment_start = proto.Field( + proto.MESSAGE, number=4, message=duration.Duration, + ) + + time_segment_end = proto.Field(proto.MESSAGE, number=5, message=duration.Duration,) + + confidence = proto.Field(proto.MESSAGE, number=6, message=wrappers.FloatValue,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..469023b122 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import wrappers_pb2 as wrappers # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"VideoClassificationPredictionResult",}, +) + + +class VideoClassificationPredictionResult(proto.Message): + r"""Prediction output format for Video Classification. + + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + type_ (str): + The type of the prediction. The requested + types can be configured via parameters. This + will be one of - segment-classification + - shot-classification + - one-sec-interval-classification + time_segment_start (~.duration.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentStart' from the + input instance, for other types it is the start + of a shot or a 1 second interval respectively. + time_segment_end (~.duration.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentEnd' from the + input instance, for other types it is the end of + a shot or a 1 second interval respectively. + confidence (~.wrappers.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + type_ = proto.Field(proto.STRING, number=3) + + time_segment_start = proto.Field( + proto.MESSAGE, number=4, message=duration.Duration, + ) + + time_segment_end = proto.Field(proto.MESSAGE, number=5, message=duration.Duration,) + + confidence = proto.Field(proto.MESSAGE, number=6, message=wrappers.FloatValue,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..026f80a325 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import wrappers_pb2 as wrappers # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"VideoObjectTrackingPredictionResult",}, +) + + +class VideoObjectTrackingPredictionResult(proto.Message): + r"""Prediction output format for Video Object Tracking. + + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (~.duration.Duration): + The beginning, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (~.duration.Duration): + The end, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (~.wrappers.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + frames (Sequence[~.video_object_tracking.VideoObjectTrackingPredictionResult.Frame]): + All of the frames of the video in which a + single object instance has been detected. The + bounding boxes in the frames identify the same + object. + """ + + class Frame(proto.Message): + r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a + bounding box, i.e. the rectangle over the video frame pinpointing + the found AnnotationSpec. The coordinates are relative to the frame + size, and the point 0,0 is in the top left of the frame. + + Attributes: + time_offset (~.duration.Duration): + A time (frame) of a video in which the object + has been detected. Expressed as a number of + seconds as measured from the start of the video, + with fractions up to a microsecond precision, + and with "s" appended at the end. + x_min (~.wrappers.FloatValue): + The leftmost coordinate of the bounding box. + x_max (~.wrappers.FloatValue): + The rightmost coordinate of the bounding box. + y_min (~.wrappers.FloatValue): + The topmost coordinate of the bounding box. + y_max (~.wrappers.FloatValue): + The bottommost coordinate of the bounding + box. + """ + + time_offset = proto.Field(proto.MESSAGE, number=1, message=duration.Duration,) + + x_min = proto.Field(proto.MESSAGE, number=2, message=wrappers.FloatValue,) + + x_max = proto.Field(proto.MESSAGE, number=3, message=wrappers.FloatValue,) + + y_min = proto.Field(proto.MESSAGE, number=4, message=wrappers.FloatValue,) + + y_max = proto.Field(proto.MESSAGE, number=5, message=wrappers.FloatValue,) + + id = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + time_segment_start = proto.Field( + proto.MESSAGE, number=3, message=duration.Duration, + ) + + time_segment_end = proto.Field(proto.MESSAGE, number=4, message=duration.Duration,) + + confidence = proto.Field(proto.MESSAGE, number=5, message=wrappers.FloatValue,) + + frames = proto.RepeatedField(proto.MESSAGE, number=6, message=Frame,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py new file mode 100644 index 0000000000..abd693172a --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_forecasting import ( + AutoMlForecasting, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_forecasting import ( + AutoMlForecastingInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_forecasting import ( + AutoMlForecastingMetadata, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import ( + AutoMlImageClassification, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import ( + AutoMlImageClassificationInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import ( + AutoMlImageClassificationMetadata, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import ( + AutoMlImageObjectDetection, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import ( + AutoMlImageObjectDetectionInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import ( + AutoMlImageObjectDetectionMetadata, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import ( + AutoMlImageSegmentation, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import ( + AutoMlImageSegmentationInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import ( + AutoMlImageSegmentationMetadata, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import ( + AutoMlTables, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import ( + AutoMlTablesInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import ( + AutoMlTablesMetadata, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_classification import ( + AutoMlTextClassification, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_classification import ( + AutoMlTextClassificationInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_extraction import ( + AutoMlTextExtraction, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_extraction import ( + AutoMlTextExtractionInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_sentiment import ( + AutoMlTextSentiment, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_sentiment import ( + AutoMlTextSentimentInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_action_recognition import ( + AutoMlVideoActionRecognition, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_action_recognition import ( + AutoMlVideoActionRecognitionInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_classification import ( + AutoMlVideoClassification, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_classification import ( + AutoMlVideoClassificationInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_object_tracking import ( + AutoMlVideoObjectTracking, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_object_tracking import ( + AutoMlVideoObjectTrackingInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.export_evaluated_data_items_config import ( + ExportEvaluatedDataItemsConfig, +) + +__all__ = ( + "AutoMlForecasting", + "AutoMlForecastingInputs", + "AutoMlForecastingMetadata", + "AutoMlImageClassification", + "AutoMlImageClassificationInputs", + "AutoMlImageClassificationMetadata", + "AutoMlImageObjectDetection", + "AutoMlImageObjectDetectionInputs", + "AutoMlImageObjectDetectionMetadata", + "AutoMlImageSegmentation", + "AutoMlImageSegmentationInputs", + "AutoMlImageSegmentationMetadata", + "AutoMlTables", + "AutoMlTablesInputs", + "AutoMlTablesMetadata", + "AutoMlTextClassification", + "AutoMlTextClassificationInputs", + "AutoMlTextExtraction", + "AutoMlTextExtractionInputs", + "AutoMlTextSentiment", + "AutoMlTextSentimentInputs", + "AutoMlVideoActionRecognition", + "AutoMlVideoActionRecognitionInputs", + "AutoMlVideoClassification", + "AutoMlVideoClassificationInputs", + "AutoMlVideoObjectTracking", + "AutoMlVideoObjectTrackingInputs", + "ExportEvaluatedDataItemsConfig", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed new file mode 100644 index 0000000000..98af260cd7 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-trainingjob-definition package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py new file mode 100644 index 0000000000..346ea62686 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .types.automl_forecasting import AutoMlForecasting +from .types.automl_forecasting import AutoMlForecastingInputs +from .types.automl_forecasting import AutoMlForecastingMetadata +from .types.automl_image_classification import AutoMlImageClassification +from .types.automl_image_classification import AutoMlImageClassificationInputs +from .types.automl_image_classification import AutoMlImageClassificationMetadata +from .types.automl_image_object_detection import AutoMlImageObjectDetection +from .types.automl_image_object_detection import AutoMlImageObjectDetectionInputs +from .types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata +from .types.automl_image_segmentation import AutoMlImageSegmentation +from .types.automl_image_segmentation import AutoMlImageSegmentationInputs +from .types.automl_image_segmentation import AutoMlImageSegmentationMetadata +from .types.automl_tables import AutoMlTables +from .types.automl_tables import AutoMlTablesInputs +from .types.automl_tables import AutoMlTablesMetadata +from .types.automl_text_classification import AutoMlTextClassification +from .types.automl_text_classification import AutoMlTextClassificationInputs +from .types.automl_text_extraction import AutoMlTextExtraction +from .types.automl_text_extraction import AutoMlTextExtractionInputs +from .types.automl_text_sentiment import AutoMlTextSentiment +from .types.automl_text_sentiment import AutoMlTextSentimentInputs +from .types.automl_video_action_recognition import AutoMlVideoActionRecognition +from .types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs +from .types.automl_video_classification import AutoMlVideoClassification +from .types.automl_video_classification import AutoMlVideoClassificationInputs +from .types.automl_video_object_tracking import AutoMlVideoObjectTracking +from .types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs +from .types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig + + +__all__ = ( + "AutoMlForecasting", + "AutoMlForecastingInputs", + "AutoMlForecastingMetadata", + "AutoMlImageClassification", + "AutoMlImageClassificationInputs", + "AutoMlImageClassificationMetadata", + "AutoMlImageObjectDetection", + "AutoMlImageObjectDetectionInputs", + "AutoMlImageObjectDetectionMetadata", + "AutoMlImageSegmentation", + "AutoMlImageSegmentationInputs", + "AutoMlImageSegmentationMetadata", + "AutoMlTables", + "AutoMlTablesInputs", + "AutoMlTablesMetadata", + "AutoMlTextClassification", + "AutoMlTextClassificationInputs", + "AutoMlTextExtraction", + "AutoMlTextExtractionInputs", + "AutoMlTextSentiment", + "AutoMlTextSentimentInputs", + "AutoMlVideoActionRecognition", + "AutoMlVideoActionRecognitionInputs", + "AutoMlVideoClassification", + "AutoMlVideoClassificationInputs", + "AutoMlVideoObjectTracking", + "AutoMlVideoObjectTrackingInputs", + "ExportEvaluatedDataItemsConfig", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed new file mode 100644 index 0000000000..98af260cd7 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-trainingjob-definition package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py new file mode 100644 index 0000000000..6a0e7903b2 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig +from .automl_forecasting import ( + AutoMlForecasting, + AutoMlForecastingInputs, + AutoMlForecastingMetadata, +) +from .automl_image_classification import ( + AutoMlImageClassification, + AutoMlImageClassificationInputs, + AutoMlImageClassificationMetadata, +) +from .automl_image_object_detection import ( + AutoMlImageObjectDetection, + AutoMlImageObjectDetectionInputs, + AutoMlImageObjectDetectionMetadata, +) +from .automl_image_segmentation import ( + AutoMlImageSegmentation, + AutoMlImageSegmentationInputs, + AutoMlImageSegmentationMetadata, +) +from .automl_tables import ( + AutoMlTables, + AutoMlTablesInputs, + AutoMlTablesMetadata, +) +from .automl_text_classification import ( + AutoMlTextClassification, + AutoMlTextClassificationInputs, +) +from .automl_text_extraction import ( + AutoMlTextExtraction, + AutoMlTextExtractionInputs, +) +from .automl_text_sentiment import ( + AutoMlTextSentiment, + AutoMlTextSentimentInputs, +) +from .automl_video_action_recognition import ( + AutoMlVideoActionRecognition, + AutoMlVideoActionRecognitionInputs, +) +from .automl_video_classification import ( + AutoMlVideoClassification, + AutoMlVideoClassificationInputs, +) +from .automl_video_object_tracking import ( + AutoMlVideoObjectTracking, + AutoMlVideoObjectTrackingInputs, +) + + +__all__ = ( + "ExportEvaluatedDataItemsConfig", + "AutoMlForecasting", + "AutoMlForecastingInputs", + "AutoMlForecastingMetadata", + "AutoMlImageClassification", + "AutoMlImageClassificationInputs", + "AutoMlImageClassificationMetadata", + "AutoMlImageObjectDetection", + "AutoMlImageObjectDetectionInputs", + "AutoMlImageObjectDetectionMetadata", + "AutoMlImageSegmentation", + "AutoMlImageSegmentationInputs", + "AutoMlImageSegmentationMetadata", + "AutoMlTables", + "AutoMlTablesInputs", + "AutoMlTablesMetadata", + "AutoMlTextClassification", + "AutoMlTextClassificationInputs", + "AutoMlTextExtraction", + "AutoMlTextExtractionInputs", + "AutoMlTextSentiment", + "AutoMlTextSentimentInputs", + "AutoMlVideoActionRecognition", + "AutoMlVideoActionRecognitionInputs", + "AutoMlVideoClassification", + "AutoMlVideoClassificationInputs", + "AutoMlVideoObjectTracking", + "AutoMlVideoObjectTrackingInputs", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py new file mode 100644 index 0000000000..40c549dc5f --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py @@ -0,0 +1,486 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types import ( + export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config, +) + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={ + "AutoMlForecasting", + "AutoMlForecastingInputs", + "AutoMlForecastingMetadata", + }, +) + + +class AutoMlForecasting(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Forecasting + Model. + + Attributes: + inputs (~.automl_forecasting.AutoMlForecastingInputs): + The input parameters of this TrainingJob. + metadata (~.automl_forecasting.AutoMlForecastingMetadata): + The metadata information. + """ + + inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlForecastingInputs",) + + metadata = proto.Field( + proto.MESSAGE, number=2, message="AutoMlForecastingMetadata", + ) + + +class AutoMlForecastingInputs(proto.Message): + r""" + + Attributes: + target_column (str): + The name of the column that the model is to + predict. + time_series_identifier_column (str): + The name of the column that identifies the + time series. + time_column (str): + The name of the column that identifies time + order in the time series. + transformations (Sequence[~.automl_forecasting.AutoMlForecastingInputs.Transformation]): + Each transformation will apply transform + function to given input column. And the result + will be used for training. When creating + transformation for BigQuery Struct column, the + column should be flattened using "." as the + delimiter. + optimization_objective (str): + Objective function the model is optimizing + towards. The training process creates a model + that optimizes the value of the objective + function over the validation set. + + The supported optimization objectives: + "minimize-rmse" (default) - Minimize root- + mean-squared error (RMSE). "minimize-mae" - + Minimize mean-absolute error (MAE). "minimize- + rmsle" - Minimize root-mean-squared log error + (RMSLE). "minimize-rmspe" - Minimize root- + mean-squared percentage error (RMSPE). + "minimize-wape-mae" - Minimize the combination + of weighted absolute percentage error (WAPE) + and mean-absolute-error (MAE). + train_budget_milli_node_hours (int): + Required. The train budget of creating this + model, expressed in milli node hours i.e. 1,000 + value in this field means 1 node hour. + The training cost of the model will not exceed + this budget. The final cost will be attempted to + be close to the budget, though may end up being + (even) noticeably smaller - at the backend's + discretion. This especially may happen when + further model training ceases to provide any + improvements. + If the budget is set to a value known to be + insufficient to train a model for the given + dataset, the training won't be attempted and + will error. + + The train budget must be between 1,000 and + 72,000 milli node hours, inclusive. + weight_column (str): + Column name that should be used as the weight + column. Higher values in this column give more + importance to the row during model training. The + column must have numeric values between 0 and + 10000 inclusively; 0 means the row is ignored + for training. If weight column field is not set, + then all rows are assumed to have equal weight + of 1. + static_columns (Sequence[str]): + Column names that should be used as static + columns. The value of these columns are static + per time series. + time_variant_past_only_columns (Sequence[str]): + Column names that should be used as time variant past only + columns. This column contains information for the given + entity (identified by the time_series_identifier_column) + that is known for the past but not the future (e.g. + population of a city in a given year, or weather on a given + day). + time_variant_past_and_future_columns (Sequence[str]): + Column names that should be used as time + variant past and future columns. This column + contains information for the given entity + (identified by the key column) that is known for + the past and the future + period (~.automl_forecasting.AutoMlForecastingInputs.Period): + Expected difference in time granularity + between rows in the data. If it is not set, the + period is inferred from data. + forecast_window_start (int): + The number of periods offset into the future as the start of + the forecast window (the window of future values to predict, + relative to the present.), where each period is one unit of + granularity as defined by the ``period`` field above. + Default to 0. Inclusive. + forecast_window_end (int): + The number of periods offset into the future as the end of + the forecast window (the window of future values to predict, + relative to the present.), where each period is one unit of + granularity as defined by the ``period`` field above. + Inclusive. + past_horizon (int): + The number of periods offset into the past to restrict past + sequence, where each period is one unit of granularity as + defined by the ``period``. Default value 0 means that it + lets algorithm to define the value. Inclusive. + export_evaluated_data_items_config (~.gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig): + Configuration for exporting test set + predictions to a BigQuery table. If this + configuration is absent, then the export is not + performed. + """ + + class Transformation(proto.Message): + r""" + + Attributes: + auto (~.automl_forecasting.AutoMlForecastingInputs.Transformation.AutoTransformation): + + numeric (~.automl_forecasting.AutoMlForecastingInputs.Transformation.NumericTransformation): + + categorical (~.automl_forecasting.AutoMlForecastingInputs.Transformation.CategoricalTransformation): + + timestamp (~.automl_forecasting.AutoMlForecastingInputs.Transformation.TimestampTransformation): + + text (~.automl_forecasting.AutoMlForecastingInputs.Transformation.TextTransformation): + + repeated_numeric (~.automl_forecasting.AutoMlForecastingInputs.Transformation.NumericArrayTransformation): + + repeated_categorical (~.automl_forecasting.AutoMlForecastingInputs.Transformation.CategoricalArrayTransformation): + + repeated_text (~.automl_forecasting.AutoMlForecastingInputs.Transformation.TextArrayTransformation): + + """ + + class AutoTransformation(proto.Message): + r"""Training pipeline will infer the proper transformation based + on the statistic of dataset. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class NumericTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The value converted to float32. + - The z_score of the value. + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. + - z_score of log(value+1) when the value is greater than or equal + to 0. Otherwise, this transformation is not applied and the value + is considered a missing value. + - A boolean value that indicates whether the value is valid. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field(proto.STRING, number=1) + + invalid_values_allowed = proto.Field(proto.BOOL, number=2) + + class CategoricalTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category + gets its own special lookup index and resulting embedding. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class TimestampTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - Apply the transformation functions for Numerical columns. + - Determine the year, month, day,and weekday. Treat each value from + the + - timestamp as a Categorical column. + - Invalid numerical values (for example, values that fall outside + of a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. + + Attributes: + column_name (str): + + time_format (str): + The format in which that time field is expressed. The + time_format must either be one of: + + - ``unix-seconds`` + - ``unix-milliseconds`` + - ``unix-microseconds`` + - ``unix-nanoseconds`` (for respectively number of seconds, + milliseconds, microseconds and nanoseconds since start of + the Unix epoch); or be written in ``strftime`` syntax. If + time_format is not set, then the default format is RFC + 3339 ``date-time`` format, where ``time-offset`` = + ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field(proto.STRING, number=1) + + time_format = proto.Field(proto.STRING, number=2) + + invalid_values_allowed = proto.Field(proto.BOOL, number=3) + + class TextTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The text as is--no change to case, punctuation, spelling, tense, + and so on. + - Tokenize text to words. Convert each words to a dictionary lookup + index and generate an embedding for each index. Combine the + embedding of all elements into a single embedding using the mean. + - Tokenization is based on unicode script boundaries. + - Missing values get their own lookup index and resulting + embedding. + - Stop-words receive no special treatment and are not removed. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class NumericArrayTransformation(proto.Message): + r"""Treats the column as numerical array and performs following + transformation functions. + + - All transformations for Numerical types applied to the average of + the all elements. + - The average of empty arrays is treated as zero. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field(proto.STRING, number=1) + + invalid_values_allowed = proto.Field(proto.BOOL, number=2) + + class CategoricalArrayTransformation(proto.Message): + r"""Treats the column as categorical array and performs following + transformation functions. + + - For each element in the array, convert the category name to a + dictionary lookup index and generate an embedding for each index. + Combine the embedding of all elements into a single embedding + using the mean. + - Empty arrays treated as an embedding of zeroes. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class TextArrayTransformation(proto.Message): + r"""Treats the column as text array and performs following + transformation functions. + + - Concatenate all text values in the array into a single text value + using a space (" ") as a delimiter, and then treat the result as + a single text value. Apply the transformations for Text columns. + - Empty arrays treated as an empty text. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + auto = proto.Field( + proto.MESSAGE, + number=1, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.AutoTransformation", + ) + + numeric = proto.Field( + proto.MESSAGE, + number=2, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.NumericTransformation", + ) + + categorical = proto.Field( + proto.MESSAGE, + number=3, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.CategoricalTransformation", + ) + + timestamp = proto.Field( + proto.MESSAGE, + number=4, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.TimestampTransformation", + ) + + text = proto.Field( + proto.MESSAGE, + number=5, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.TextTransformation", + ) + + repeated_numeric = proto.Field( + proto.MESSAGE, + number=6, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.NumericArrayTransformation", + ) + + repeated_categorical = proto.Field( + proto.MESSAGE, + number=7, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.CategoricalArrayTransformation", + ) + + repeated_text = proto.Field( + proto.MESSAGE, + number=8, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.TextArrayTransformation", + ) + + class Period(proto.Message): + r"""A duration of time expressed in time granularity units. + + Attributes: + unit (str): + The time granularity unit of this time + period. The supported unit are: + "hour" + "day" + "week" + "month" + "year". + quantity (int): + The number of units per period, e.g. 3 weeks + or 2 months. + """ + + unit = proto.Field(proto.STRING, number=1) + + quantity = proto.Field(proto.INT64, number=2) + + target_column = proto.Field(proto.STRING, number=1) + + time_series_identifier_column = proto.Field(proto.STRING, number=2) + + time_column = proto.Field(proto.STRING, number=3) + + transformations = proto.RepeatedField( + proto.MESSAGE, number=4, message=Transformation, + ) + + optimization_objective = proto.Field(proto.STRING, number=5) + + train_budget_milli_node_hours = proto.Field(proto.INT64, number=6) + + weight_column = proto.Field(proto.STRING, number=7) + + static_columns = proto.RepeatedField(proto.STRING, number=8) + + time_variant_past_only_columns = proto.RepeatedField(proto.STRING, number=9) + + time_variant_past_and_future_columns = proto.RepeatedField(proto.STRING, number=10) + + period = proto.Field(proto.MESSAGE, number=11, message=Period,) + + forecast_window_start = proto.Field(proto.INT64, number=12) + + forecast_window_end = proto.Field(proto.INT64, number=13) + + past_horizon = proto.Field(proto.INT64, number=14) + + export_evaluated_data_items_config = proto.Field( + proto.MESSAGE, + number=15, + message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, + ) + + +class AutoMlForecastingMetadata(proto.Message): + r"""Model metadata specific to AutoML Forecasting. + + Attributes: + train_cost_milli_node_hours (int): + Output only. The actual training cost of the + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed the train budget. + """ + + train_cost_milli_node_hours = proto.Field(proto.INT64, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py new file mode 100644 index 0000000000..0ee0394192 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={ + "AutoMlImageClassification", + "AutoMlImageClassificationInputs", + "AutoMlImageClassificationMetadata", + }, +) + + +class AutoMlImageClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image + Classification Model. + + Attributes: + inputs (~.automl_image_classification.AutoMlImageClassificationInputs): + The input parameters of this TrainingJob. + metadata (~.automl_image_classification.AutoMlImageClassificationMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, number=1, message="AutoMlImageClassificationInputs", + ) + + metadata = proto.Field( + proto.MESSAGE, number=2, message="AutoMlImageClassificationMetadata", + ) + + +class AutoMlImageClassificationInputs(proto.Message): + r""" + + Attributes: + model_type (~.automl_image_classification.AutoMlImageClassificationInputs.ModelType): + + base_model_id (str): + The ID of the ``base`` model. If it is specified, the new + model will be trained based on the ``base`` model. + Otherwise, the new model will be trained from scratch. The + ``base`` model must be in the same Project and Location as + the new Model to train, and have the same modelType. + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. For modelType + ``cloud``\ (default), the budget must be between 8,000 and + 800,000 milli node hours, inclusive. The default value is + 192,000 which represents one day in wall time, considering 8 + nodes are used. For model types ``mobile-tf-low-latency-1``, + ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1``, + the training budget must be between 1,000 and 100,000 milli + node hours, inclusive. The default value is 24,000 which + represents one day in wall time on a single node that is + used. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. When false the early + stopping feature is enabled, which means that + AutoML Image Classification might stop training + before the entire training budget has been used. + multi_label (bool): + If false, a single-label (multi-class) Model + will be trained (i.e. assuming that for each + image just up to one annotation may be + applicable). If true, a multi-label Model will + be trained (i.e. assuming that for each image + multiple annotations may be applicable). + """ + + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_TF_LOW_LATENCY_1 = 2 + MOBILE_TF_VERSATILE_1 = 3 + MOBILE_TF_HIGH_ACCURACY_1 = 4 + + model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,) + + base_model_id = proto.Field(proto.STRING, number=2) + + budget_milli_node_hours = proto.Field(proto.INT64, number=3) + + disable_early_stopping = proto.Field(proto.BOOL, number=4) + + multi_label = proto.Field(proto.BOOL, number=5) + + +class AutoMlImageClassificationMetadata(proto.Message): + r""" + + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (~.automl_image_classification.AutoMlImageClassificationMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field(proto.INT64, number=1) + + successful_stop_reason = proto.Field( + proto.ENUM, number=2, enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py new file mode 100644 index 0000000000..3fb9d3ae1d --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={ + "AutoMlImageObjectDetection", + "AutoMlImageObjectDetectionInputs", + "AutoMlImageObjectDetectionMetadata", + }, +) + + +class AutoMlImageObjectDetection(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image Object + Detection Model. + + Attributes: + inputs (~.automl_image_object_detection.AutoMlImageObjectDetectionInputs): + The input parameters of this TrainingJob. + metadata (~.automl_image_object_detection.AutoMlImageObjectDetectionMetadata): + The metadata information + """ + + inputs = proto.Field( + proto.MESSAGE, number=1, message="AutoMlImageObjectDetectionInputs", + ) + + metadata = proto.Field( + proto.MESSAGE, number=2, message="AutoMlImageObjectDetectionMetadata", + ) + + +class AutoMlImageObjectDetectionInputs(proto.Message): + r""" + + Attributes: + model_type (~.automl_image_object_detection.AutoMlImageObjectDetectionInputs.ModelType): + + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. For modelType + ``cloud``\ (default), the budget must be between 20,000 and + 900,000 milli node hours, inclusive. The default value is + 216,000 which represents one day in wall time, considering 9 + nodes are used. For model types ``mobile-tf-low-latency-1``, + ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1`` the + training budget must be between 1,000 and 100,000 milli node + hours, inclusive. The default value is 24,000 which + represents one day in wall time on a single node that is + used. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. When false the early + stopping feature is enabled, which means that + AutoML Image Object Detection might stop + training before the entire training budget has + been used. + """ + + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD_HIGH_ACCURACY_1 = 1 + CLOUD_LOW_LATENCY_1 = 2 + MOBILE_TF_LOW_LATENCY_1 = 3 + MOBILE_TF_VERSATILE_1 = 4 + MOBILE_TF_HIGH_ACCURACY_1 = 5 + + model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,) + + budget_milli_node_hours = proto.Field(proto.INT64, number=2) + + disable_early_stopping = proto.Field(proto.BOOL, number=3) + + +class AutoMlImageObjectDetectionMetadata(proto.Message): + r""" + + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (~.automl_image_object_detection.AutoMlImageObjectDetectionMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field(proto.INT64, number=1) + + successful_stop_reason = proto.Field( + proto.ENUM, number=2, enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py new file mode 100644 index 0000000000..0fa3788b11 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={ + "AutoMlImageSegmentation", + "AutoMlImageSegmentationInputs", + "AutoMlImageSegmentationMetadata", + }, +) + + +class AutoMlImageSegmentation(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image + Segmentation Model. + + Attributes: + inputs (~.automl_image_segmentation.AutoMlImageSegmentationInputs): + The input parameters of this TrainingJob. + metadata (~.automl_image_segmentation.AutoMlImageSegmentationMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, number=1, message="AutoMlImageSegmentationInputs", + ) + + metadata = proto.Field( + proto.MESSAGE, number=2, message="AutoMlImageSegmentationMetadata", + ) + + +class AutoMlImageSegmentationInputs(proto.Message): + r""" + + Attributes: + model_type (~.automl_image_segmentation.AutoMlImageSegmentationInputs.ModelType): + + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. Or actaul_wall_clock_hours = + train_budget_milli_node_hours / (number_of_nodes_involved \* + 1000) For modelType ``cloud-high-accuracy-1``\ (default), + the budget must be between 20,000 and 2,000,000 milli node + hours, inclusive. The default value is 192,000 which + represents one day in wall time (1000 milli \* 24 hours \* 8 + nodes). + base_model_id (str): + The ID of the ``base`` model. If it is specified, the new + model will be trained based on the ``base`` model. + Otherwise, the new model will be trained from scratch. The + ``base`` model must be in the same Project and Location as + the new Model to train, and have the same modelType. + """ + + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD_HIGH_ACCURACY_1 = 1 + CLOUD_LOW_ACCURACY_1 = 2 + + model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,) + + budget_milli_node_hours = proto.Field(proto.INT64, number=2) + + base_model_id = proto.Field(proto.STRING, number=3) + + +class AutoMlImageSegmentationMetadata(proto.Message): + r""" + + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (~.automl_image_segmentation.AutoMlImageSegmentationMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field(proto.INT64, number=1) + + successful_stop_reason = proto.Field( + proto.ENUM, number=2, enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py new file mode 100644 index 0000000000..55d620b32e --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py @@ -0,0 +1,447 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types import ( + export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config, +) + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"AutoMlTables", "AutoMlTablesInputs", "AutoMlTablesMetadata",}, +) + + +class AutoMlTables(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Tables Model. + + Attributes: + inputs (~.automl_tables.AutoMlTablesInputs): + The input parameters of this TrainingJob. + metadata (~.automl_tables.AutoMlTablesMetadata): + The metadata information. + """ + + inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlTablesInputs",) + + metadata = proto.Field(proto.MESSAGE, number=2, message="AutoMlTablesMetadata",) + + +class AutoMlTablesInputs(proto.Message): + r""" + + Attributes: + optimization_objective_recall_value (float): + Required when optimization_objective is + "maximize-precision-at-recall". Must be between 0 and 1, + inclusive. + optimization_objective_precision_value (float): + Required when optimization_objective is + "maximize-recall-at-precision". Must be between 0 and 1, + inclusive. + prediction_type (str): + The type of prediction the Model is to + produce. "classification" - Predict one out of + multiple target values is + picked for each row. + "regression" - Predict a value based on its + relation to other values. This + type is available only to columns that contain + semantically numeric values, i.e. integers or + floating point number, even if + stored as e.g. strings. + target_column (str): + The column name of the target column that the + model is to predict. + transformations (Sequence[~.automl_tables.AutoMlTablesInputs.Transformation]): + Each transformation will apply transform + function to given input column. And the result + will be used for training. When creating + transformation for BigQuery Struct column, the + column should be flattened using "." as the + delimiter. + optimization_objective (str): + Objective function the model is optimizing + towards. The training process creates a model + that maximizes/minimizes the value of the + objective function over the validation set. + + The supported optimization objectives depend on + the prediction type. If the field is not set, a + default objective function is used. + classification (binary): + "maximize-au-roc" (default) - Maximize the + area under the receiver + operating characteristic (ROC) curve. + "minimize-log-loss" - Minimize log loss. + "maximize-au-prc" - Maximize the area under + the precision-recall curve. "maximize- + precision-at-recall" - Maximize precision for a + specified + recall value. "maximize-recall-at-precision" - + Maximize recall for a specified + precision value. + classification (multi-class): + "minimize-log-loss" (default) - Minimize log + loss. + regression: + "minimize-rmse" (default) - Minimize root- + mean-squared error (RMSE). "minimize-mae" - + Minimize mean-absolute error (MAE). "minimize- + rmsle" - Minimize root-mean-squared log error + (RMSLE). + train_budget_milli_node_hours (int): + Required. The train budget of creating this + model, expressed in milli node hours i.e. 1,000 + value in this field means 1 node hour. + The training cost of the model will not exceed + this budget. The final cost will be attempted to + be close to the budget, though may end up being + (even) noticeably smaller - at the backend's + discretion. This especially may happen when + further model training ceases to provide any + improvements. + If the budget is set to a value known to be + insufficient to train a model for the given + dataset, the training won't be attempted and + will error. + + The train budget must be between 1,000 and + 72,000 milli node hours, inclusive. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. By default, the + early stopping feature is enabled, which means + that AutoML Tables might stop training before + the entire training budget has been used. + weight_column_name (str): + Column name that should be used as the weight + column. Higher values in this column give more + importance to the row during model training. The + column must have numeric values between 0 and + 10000 inclusively; 0 means the row is ignored + for training. If weight column field is not set, + then all rows are assumed to have equal weight + of 1. + export_evaluated_data_items_config (~.gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig): + Configuration for exporting test set + predictions to a BigQuery table. If this + configuration is absent, then the export is not + performed. + """ + + class Transformation(proto.Message): + r""" + + Attributes: + auto (~.automl_tables.AutoMlTablesInputs.Transformation.AutoTransformation): + + numeric (~.automl_tables.AutoMlTablesInputs.Transformation.NumericTransformation): + + categorical (~.automl_tables.AutoMlTablesInputs.Transformation.CategoricalTransformation): + + timestamp (~.automl_tables.AutoMlTablesInputs.Transformation.TimestampTransformation): + + text (~.automl_tables.AutoMlTablesInputs.Transformation.TextTransformation): + + repeated_numeric (~.automl_tables.AutoMlTablesInputs.Transformation.NumericArrayTransformation): + + repeated_categorical (~.automl_tables.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation): + + repeated_text (~.automl_tables.AutoMlTablesInputs.Transformation.TextArrayTransformation): + + """ + + class AutoTransformation(proto.Message): + r"""Training pipeline will infer the proper transformation based + on the statistic of dataset. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class NumericTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The value converted to float32. + - The z_score of the value. + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. + - z_score of log(value+1) when the value is greater than or equal + to 0. Otherwise, this transformation is not applied and the value + is considered a missing value. + - A boolean value that indicates whether the value is valid. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field(proto.STRING, number=1) + + invalid_values_allowed = proto.Field(proto.BOOL, number=2) + + class CategoricalTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category + gets its own special lookup index and resulting embedding. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class TimestampTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - Apply the transformation functions for Numerical columns. + - Determine the year, month, day,and weekday. Treat each value from + the + - timestamp as a Categorical column. + - Invalid numerical values (for example, values that fall outside + of a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. + + Attributes: + column_name (str): + + time_format (str): + The format in which that time field is expressed. The + time_format must either be one of: + + - ``unix-seconds`` + - ``unix-milliseconds`` + - ``unix-microseconds`` + - ``unix-nanoseconds`` (for respectively number of seconds, + milliseconds, microseconds and nanoseconds since start of + the Unix epoch); or be written in ``strftime`` syntax. If + time_format is not set, then the default format is RFC + 3339 ``date-time`` format, where ``time-offset`` = + ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field(proto.STRING, number=1) + + time_format = proto.Field(proto.STRING, number=2) + + invalid_values_allowed = proto.Field(proto.BOOL, number=3) + + class TextTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The text as is--no change to case, punctuation, spelling, tense, + and so on. + - Tokenize text to words. Convert each words to a dictionary lookup + index and generate an embedding for each index. Combine the + embedding of all elements into a single embedding using the mean. + - Tokenization is based on unicode script boundaries. + - Missing values get their own lookup index and resulting + embedding. + - Stop-words receive no special treatment and are not removed. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class NumericArrayTransformation(proto.Message): + r"""Treats the column as numerical array and performs following + transformation functions. + + - All transformations for Numerical types applied to the average of + the all elements. + - The average of empty arrays is treated as zero. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field(proto.STRING, number=1) + + invalid_values_allowed = proto.Field(proto.BOOL, number=2) + + class CategoricalArrayTransformation(proto.Message): + r"""Treats the column as categorical array and performs following + transformation functions. + + - For each element in the array, convert the category name to a + dictionary lookup index and generate an embedding for each index. + Combine the embedding of all elements into a single embedding + using the mean. + - Empty arrays treated as an embedding of zeroes. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class TextArrayTransformation(proto.Message): + r"""Treats the column as text array and performs following + transformation functions. + + - Concatenate all text values in the array into a single text value + using a space (" ") as a delimiter, and then treat the result as + a single text value. Apply the transformations for Text columns. + - Empty arrays treated as an empty text. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + auto = proto.Field( + proto.MESSAGE, + number=1, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.AutoTransformation", + ) + + numeric = proto.Field( + proto.MESSAGE, + number=2, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.NumericTransformation", + ) + + categorical = proto.Field( + proto.MESSAGE, + number=3, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.CategoricalTransformation", + ) + + timestamp = proto.Field( + proto.MESSAGE, + number=4, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.TimestampTransformation", + ) + + text = proto.Field( + proto.MESSAGE, + number=5, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.TextTransformation", + ) + + repeated_numeric = proto.Field( + proto.MESSAGE, + number=6, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.NumericArrayTransformation", + ) + + repeated_categorical = proto.Field( + proto.MESSAGE, + number=7, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.CategoricalArrayTransformation", + ) + + repeated_text = proto.Field( + proto.MESSAGE, + number=8, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.TextArrayTransformation", + ) + + optimization_objective_recall_value = proto.Field( + proto.FLOAT, number=5, oneof="additional_optimization_objective_config" + ) + + optimization_objective_precision_value = proto.Field( + proto.FLOAT, number=6, oneof="additional_optimization_objective_config" + ) + + prediction_type = proto.Field(proto.STRING, number=1) + + target_column = proto.Field(proto.STRING, number=2) + + transformations = proto.RepeatedField( + proto.MESSAGE, number=3, message=Transformation, + ) + + optimization_objective = proto.Field(proto.STRING, number=4) + + train_budget_milli_node_hours = proto.Field(proto.INT64, number=7) + + disable_early_stopping = proto.Field(proto.BOOL, number=8) + + weight_column_name = proto.Field(proto.STRING, number=9) + + export_evaluated_data_items_config = proto.Field( + proto.MESSAGE, + number=10, + message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, + ) + + +class AutoMlTablesMetadata(proto.Message): + r"""Model metadata specific to AutoML Tables. + + Attributes: + train_cost_milli_node_hours (int): + Output only. The actual training cost of the + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed the train budget. + """ + + train_cost_milli_node_hours = proto.Field(proto.INT64, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py new file mode 100644 index 0000000000..ca75734600 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"AutoMlTextClassification", "AutoMlTextClassificationInputs",}, +) + + +class AutoMlTextClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Classification Model. + + Attributes: + inputs (~.automl_text_classification.AutoMlTextClassificationInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, number=1, message="AutoMlTextClassificationInputs", + ) + + +class AutoMlTextClassificationInputs(proto.Message): + r""" + + Attributes: + multi_label (bool): + + """ + + multi_label = proto.Field(proto.BOOL, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py new file mode 100644 index 0000000000..336509af22 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"AutoMlTextExtraction", "AutoMlTextExtractionInputs",}, +) + + +class AutoMlTextExtraction(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Extraction Model. + + Attributes: + inputs (~.automl_text_extraction.AutoMlTextExtractionInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlTextExtractionInputs",) + + +class AutoMlTextExtractionInputs(proto.Message): + r"""""" + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py new file mode 100644 index 0000000000..d5de97e2b2 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"AutoMlTextSentiment", "AutoMlTextSentimentInputs",}, +) + + +class AutoMlTextSentiment(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Sentiment Model. + + Attributes: + inputs (~.automl_text_sentiment.AutoMlTextSentimentInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlTextSentimentInputs",) + + +class AutoMlTextSentimentInputs(proto.Message): + r""" + + Attributes: + sentiment_max (int): + A sentiment is expressed as an integer + ordinal, where higher value means a more + positive sentiment. The range of sentiments that + will be used is between 0 and sentimentMax + (inclusive on both ends), and all the values in + the range must be represented in the dataset + before a model can be created. + Only the Annotations with this sentimentMax will + be used for training. sentimentMax value must be + between 1 and 10 (inclusive). + """ + + sentiment_max = proto.Field(proto.INT32, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py new file mode 100644 index 0000000000..d6969d93c6 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"AutoMlVideoActionRecognition", "AutoMlVideoActionRecognitionInputs",}, +) + + +class AutoMlVideoActionRecognition(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video Action + Recognition Model. + + Attributes: + inputs (~.automl_video_action_recognition.AutoMlVideoActionRecognitionInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, number=1, message="AutoMlVideoActionRecognitionInputs", + ) + + +class AutoMlVideoActionRecognitionInputs(proto.Message): + r""" + + Attributes: + model_type (~.automl_video_action_recognition.AutoMlVideoActionRecognitionInputs.ModelType): + + """ + + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + + model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py new file mode 100644 index 0000000000..3164544d47 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"AutoMlVideoClassification", "AutoMlVideoClassificationInputs",}, +) + + +class AutoMlVideoClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video + Classification Model. + + Attributes: + inputs (~.automl_video_classification.AutoMlVideoClassificationInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, number=1, message="AutoMlVideoClassificationInputs", + ) + + +class AutoMlVideoClassificationInputs(proto.Message): + r""" + + Attributes: + model_type (~.automl_video_classification.AutoMlVideoClassificationInputs.ModelType): + + """ + + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + + model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py new file mode 100644 index 0000000000..0fd8c7ec7a --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"AutoMlVideoObjectTracking", "AutoMlVideoObjectTrackingInputs",}, +) + + +class AutoMlVideoObjectTracking(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video + ObjectTracking Model. + + Attributes: + inputs (~.automl_video_object_tracking.AutoMlVideoObjectTrackingInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, number=1, message="AutoMlVideoObjectTrackingInputs", + ) + + +class AutoMlVideoObjectTrackingInputs(proto.Message): + r""" + + Attributes: + model_type (~.automl_video_object_tracking.AutoMlVideoObjectTrackingInputs.ModelType): + + """ + + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + MOBILE_CORAL_VERSATILE_1 = 3 + MOBILE_CORAL_LOW_LATENCY_1 = 4 + MOBILE_JETSON_VERSATILE_1 = 5 + MOBILE_JETSON_LOW_LATENCY_1 = 6 + + model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py new file mode 100644 index 0000000000..29bc547adf --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"ExportEvaluatedDataItemsConfig",}, +) + + +class ExportEvaluatedDataItemsConfig(proto.Message): + r"""Configuration for exporting test set predictions to a + BigQuery table. + + Attributes: + destination_bigquery_uri (str): + URI of desired destination BigQuery table. If not specified, + then results are exported to the following auto-created + BigQuery table: + + :export_evaluated_examples__.evaluated_examples + override_existing_table (bool): + If true and an export destination is + specified, then the contents of the destination + will be overwritten. Otherwise, if the export + destination already exists, then the export + operation will not trigger and a failure + response is returned. + """ + + destination_bigquery_uri = proto.Field(proto.STRING, number=1) + + override_existing_table = proto.Field(proto.BOOL, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index f49f90f5eb..5f466b2e9b 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -229,6 +229,7 @@ "DataItem", "DataLabelingJob", "Dataset", + "DatasetServiceClient", "DedicatedResources", "DeleteBatchPredictionJobRequest", "DeleteCustomJobRequest", @@ -345,7 +346,6 @@ "SearchMigratableResourcesResponse", "SmoothGradConfig", "SpecialistPool", - "SpecialistPoolServiceClient", "StudySpec", "TimestampSplit", "TrainingConfig", @@ -365,5 +365,5 @@ "UserActionReference", "WorkerPoolSpec", "XraiAttribution", - "DatasetServiceClient", + "SpecialistPoolServiceClient", ) diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py index 775558e3b1..1927709f30 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py @@ -228,7 +228,7 @@ async def create_dataset( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_dataset, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -308,7 +308,7 @@ async def get_dataset( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_dataset, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -396,7 +396,7 @@ async def update_dataset( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_dataset, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -473,7 +473,7 @@ async def list_datasets( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_datasets, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -567,7 +567,7 @@ async def delete_dataset( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_dataset, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -662,7 +662,7 @@ async def import_data( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.import_data, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -755,7 +755,7 @@ async def export_data( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.export_data, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -839,7 +839,7 @@ async def list_data_items( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_data_items, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -919,7 +919,7 @@ async def get_annotation_spec( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_annotation_spec, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -996,7 +996,7 @@ async def list_annotations( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_annotations, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py index 8cceeb197c..56f567959a 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py @@ -112,34 +112,34 @@ def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_dataset: gapic_v1.method.wrap_method( - self.create_dataset, default_timeout=None, client_info=client_info, + self.create_dataset, default_timeout=5.0, client_info=client_info, ), self.get_dataset: gapic_v1.method.wrap_method( - self.get_dataset, default_timeout=None, client_info=client_info, + self.get_dataset, default_timeout=5.0, client_info=client_info, ), self.update_dataset: gapic_v1.method.wrap_method( - self.update_dataset, default_timeout=None, client_info=client_info, + self.update_dataset, default_timeout=5.0, client_info=client_info, ), self.list_datasets: gapic_v1.method.wrap_method( - self.list_datasets, default_timeout=None, client_info=client_info, + self.list_datasets, default_timeout=5.0, client_info=client_info, ), self.delete_dataset: gapic_v1.method.wrap_method( - self.delete_dataset, default_timeout=None, client_info=client_info, + self.delete_dataset, default_timeout=5.0, client_info=client_info, ), self.import_data: gapic_v1.method.wrap_method( - self.import_data, default_timeout=None, client_info=client_info, + self.import_data, default_timeout=5.0, client_info=client_info, ), self.export_data: gapic_v1.method.wrap_method( - self.export_data, default_timeout=None, client_info=client_info, + self.export_data, default_timeout=5.0, client_info=client_info, ), self.list_data_items: gapic_v1.method.wrap_method( - self.list_data_items, default_timeout=None, client_info=client_info, + self.list_data_items, default_timeout=5.0, client_info=client_info, ), self.get_annotation_spec: gapic_v1.method.wrap_method( - self.get_annotation_spec, default_timeout=None, client_info=client_info, + self.get_annotation_spec, default_timeout=5.0, client_info=client_info, ), self.list_annotations: gapic_v1.method.wrap_method( - self.list_annotations, default_timeout=None, client_info=client_info, + self.list_annotations, default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py index 9056e7a149..9c6af3bd16 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py @@ -219,7 +219,7 @@ async def create_endpoint( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_endpoint, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -300,7 +300,7 @@ async def get_endpoint( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_endpoint, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -376,7 +376,7 @@ async def list_endpoints( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_endpoints, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -464,7 +464,7 @@ async def update_endpoint( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_endpoint, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -554,7 +554,7 @@ async def delete_endpoint( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_endpoint, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -678,7 +678,7 @@ async def deploy_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.deploy_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -793,7 +793,7 @@ async def undeploy_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.undeploy_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py index 63965464b7..e55589de8f 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py @@ -111,25 +111,25 @@ def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_endpoint: gapic_v1.method.wrap_method( - self.create_endpoint, default_timeout=None, client_info=client_info, + self.create_endpoint, default_timeout=5.0, client_info=client_info, ), self.get_endpoint: gapic_v1.method.wrap_method( - self.get_endpoint, default_timeout=None, client_info=client_info, + self.get_endpoint, default_timeout=5.0, client_info=client_info, ), self.list_endpoints: gapic_v1.method.wrap_method( - self.list_endpoints, default_timeout=None, client_info=client_info, + self.list_endpoints, default_timeout=5.0, client_info=client_info, ), self.update_endpoint: gapic_v1.method.wrap_method( - self.update_endpoint, default_timeout=None, client_info=client_info, + self.update_endpoint, default_timeout=5.0, client_info=client_info, ), self.delete_endpoint: gapic_v1.method.wrap_method( - self.delete_endpoint, default_timeout=None, client_info=client_info, + self.delete_endpoint, default_timeout=5.0, client_info=client_info, ), self.deploy_model: gapic_v1.method.wrap_method( - self.deploy_model, default_timeout=None, client_info=client_info, + self.deploy_model, default_timeout=5.0, client_info=client_info, ), self.undeploy_model: gapic_v1.method.wrap_method( - self.undeploy_model, default_timeout=None, client_info=client_info, + self.undeploy_model, default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py index d988c81d3c..2a24748d11 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py @@ -252,7 +252,7 @@ async def create_custom_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_custom_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -330,7 +330,7 @@ async def get_custom_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_custom_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -406,7 +406,7 @@ async def list_custom_jobs( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_custom_jobs, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -500,7 +500,7 @@ async def delete_custom_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_custom_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -586,7 +586,7 @@ async def cancel_custom_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_custom_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -667,7 +667,7 @@ async def create_data_labeling_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -741,7 +741,7 @@ async def get_data_labeling_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -816,7 +816,7 @@ async def list_data_labeling_jobs( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_data_labeling_jobs, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -911,7 +911,7 @@ async def delete_data_labeling_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -987,7 +987,7 @@ async def cancel_data_labeling_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1070,7 +1070,7 @@ async def create_hyperparameter_tuning_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1146,7 +1146,7 @@ async def get_hyperparameter_tuning_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1222,7 +1222,7 @@ async def list_hyperparameter_tuning_jobs( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_hyperparameter_tuning_jobs, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1317,7 +1317,7 @@ async def delete_hyperparameter_tuning_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1406,7 +1406,7 @@ async def cancel_hyperparameter_tuning_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1493,7 +1493,7 @@ async def create_batch_prediction_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1572,7 +1572,7 @@ async def get_batch_prediction_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1648,7 +1648,7 @@ async def list_batch_prediction_jobs( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_batch_prediction_jobs, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1744,7 +1744,7 @@ async def delete_batch_prediction_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1831,7 +1831,7 @@ async def cancel_batch_prediction_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py index 04c05890bc..3d1f0be59b 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py @@ -124,93 +124,93 @@ def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_custom_job: gapic_v1.method.wrap_method( - self.create_custom_job, default_timeout=None, client_info=client_info, + self.create_custom_job, default_timeout=5.0, client_info=client_info, ), self.get_custom_job: gapic_v1.method.wrap_method( - self.get_custom_job, default_timeout=None, client_info=client_info, + self.get_custom_job, default_timeout=5.0, client_info=client_info, ), self.list_custom_jobs: gapic_v1.method.wrap_method( - self.list_custom_jobs, default_timeout=None, client_info=client_info, + self.list_custom_jobs, default_timeout=5.0, client_info=client_info, ), self.delete_custom_job: gapic_v1.method.wrap_method( - self.delete_custom_job, default_timeout=None, client_info=client_info, + self.delete_custom_job, default_timeout=5.0, client_info=client_info, ), self.cancel_custom_job: gapic_v1.method.wrap_method( - self.cancel_custom_job, default_timeout=None, client_info=client_info, + self.cancel_custom_job, default_timeout=5.0, client_info=client_info, ), self.create_data_labeling_job: gapic_v1.method.wrap_method( self.create_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_data_labeling_job: gapic_v1.method.wrap_method( self.get_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.list_data_labeling_jobs: gapic_v1.method.wrap_method( self.list_data_labeling_jobs, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.delete_data_labeling_job: gapic_v1.method.wrap_method( self.delete_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.cancel_data_labeling_job: gapic_v1.method.wrap_method( self.cancel_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.create_hyperparameter_tuning_job: gapic_v1.method.wrap_method( self.create_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_hyperparameter_tuning_job: gapic_v1.method.wrap_method( self.get_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.list_hyperparameter_tuning_jobs: gapic_v1.method.wrap_method( self.list_hyperparameter_tuning_jobs, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.delete_hyperparameter_tuning_job: gapic_v1.method.wrap_method( self.delete_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.cancel_hyperparameter_tuning_job: gapic_v1.method.wrap_method( self.cancel_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.create_batch_prediction_job: gapic_v1.method.wrap_method( self.create_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_batch_prediction_job: gapic_v1.method.wrap_method( self.get_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.list_batch_prediction_jobs: gapic_v1.method.wrap_method( self.list_batch_prediction_jobs, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.delete_batch_prediction_job: gapic_v1.method.wrap_method( self.delete_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.cancel_batch_prediction_job: gapic_v1.method.wrap_method( self.cancel_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py index 81c1f9cb51..3b27b6e184 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py @@ -235,7 +235,7 @@ async def upload_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.upload_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -313,7 +313,7 @@ async def get_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -389,7 +389,7 @@ async def list_models( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_models, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -476,7 +476,7 @@ async def update_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -568,7 +568,7 @@ async def delete_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -666,7 +666,7 @@ async def export_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.export_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -750,7 +750,7 @@ async def get_model_evaluation( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_model_evaluation, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -826,7 +826,7 @@ async def list_model_evaluations( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_model_evaluations, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -908,7 +908,7 @@ async def get_model_evaluation_slice( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_model_evaluation_slice, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -985,7 +985,7 @@ async def list_model_evaluation_slices( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_model_evaluation_slices, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py index 681d035178..2f87fc98dd 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py @@ -113,41 +113,39 @@ def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.upload_model: gapic_v1.method.wrap_method( - self.upload_model, default_timeout=None, client_info=client_info, + self.upload_model, default_timeout=5.0, client_info=client_info, ), self.get_model: gapic_v1.method.wrap_method( - self.get_model, default_timeout=None, client_info=client_info, + self.get_model, default_timeout=5.0, client_info=client_info, ), self.list_models: gapic_v1.method.wrap_method( - self.list_models, default_timeout=None, client_info=client_info, + self.list_models, default_timeout=5.0, client_info=client_info, ), self.update_model: gapic_v1.method.wrap_method( - self.update_model, default_timeout=None, client_info=client_info, + self.update_model, default_timeout=5.0, client_info=client_info, ), self.delete_model: gapic_v1.method.wrap_method( - self.delete_model, default_timeout=None, client_info=client_info, + self.delete_model, default_timeout=5.0, client_info=client_info, ), self.export_model: gapic_v1.method.wrap_method( - self.export_model, default_timeout=None, client_info=client_info, + self.export_model, default_timeout=5.0, client_info=client_info, ), self.get_model_evaluation: gapic_v1.method.wrap_method( - self.get_model_evaluation, - default_timeout=None, - client_info=client_info, + self.get_model_evaluation, default_timeout=5.0, client_info=client_info, ), self.list_model_evaluations: gapic_v1.method.wrap_method( self.list_model_evaluations, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_model_evaluation_slice: gapic_v1.method.wrap_method( self.get_model_evaluation_slice, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.list_model_evaluation_slices: gapic_v1.method.wrap_method( self.list_model_evaluation_slices, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py index d361b05e21..ef420aae0b 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py @@ -230,7 +230,7 @@ async def create_training_pipeline( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -308,7 +308,7 @@ async def get_training_pipeline( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -384,7 +384,7 @@ async def list_training_pipelines( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_training_pipelines, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -479,7 +479,7 @@ async def delete_training_pipeline( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -567,7 +567,7 @@ async def cancel_training_pipeline( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py index 1b235635f1..41123b8615 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py @@ -115,27 +115,27 @@ def _prep_wrapped_messages(self, client_info): self._wrapped_methods = { self.create_training_pipeline: gapic_v1.method.wrap_method( self.create_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_training_pipeline: gapic_v1.method.wrap_method( self.get_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.list_training_pipelines: gapic_v1.method.wrap_method( self.list_training_pipelines, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.delete_training_pipeline: gapic_v1.method.wrap_method( self.delete_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.cancel_training_pipeline: gapic_v1.method.wrap_method( self.cancel_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py index c82146bafa..bb58b0bfac 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -230,7 +230,7 @@ async def predict( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.predict, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -356,7 +356,7 @@ async def explain( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.explain, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py index cdec1c11e5..0c82f7d83c 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py @@ -107,10 +107,10 @@ def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.predict: gapic_v1.method.wrap_method( - self.predict, default_timeout=None, client_info=client_info, + self.predict, default_timeout=5.0, client_info=client_info, ), self.explain: gapic_v1.method.wrap_method( - self.explain, default_timeout=None, client_info=client_info, + self.explain, default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py index 77f40bd4ad..c693126d4c 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py @@ -236,7 +236,7 @@ async def create_specialist_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -327,7 +327,7 @@ async def get_specialist_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -403,7 +403,7 @@ async def list_specialist_pools( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_specialist_pools, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -498,7 +498,7 @@ async def delete_specialist_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -597,7 +597,7 @@ async def update_specialist_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py index 30fbd3030f..f1af058030 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py @@ -111,25 +111,25 @@ def _prep_wrapped_messages(self, client_info): self._wrapped_methods = { self.create_specialist_pool: gapic_v1.method.wrap_method( self.create_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_specialist_pool: gapic_v1.method.wrap_method( - self.get_specialist_pool, default_timeout=None, client_info=client_info, + self.get_specialist_pool, default_timeout=5.0, client_info=client_info, ), self.list_specialist_pools: gapic_v1.method.wrap_method( self.list_specialist_pools, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.delete_specialist_pool: gapic_v1.method.wrap_method( self.delete_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.update_specialist_pool: gapic_v1.method.wrap_method( self.update_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index 97e5625d20..c668a7be98 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -15,7 +15,23 @@ # limitations under the License. # +from .user_action_reference import UserActionReference +from .annotation import Annotation from .annotation_spec import AnnotationSpec +from .completion_stats import CompletionStats +from .explanation_metadata import ExplanationMetadata +from .explanation import ( + Explanation, + ModelExplanation, + Attribution, + ExplanationSpec, + ExplanationParameters, + SampledShapleyAttribution, + IntegratedGradientsAttribution, + XraiAttribution, + SmoothGradConfig, + FeatureNoiseSigma, +) from .io import ( GcsSource, GcsDestination, @@ -23,14 +39,6 @@ BigQueryDestination, ContainerRegistryDestination, ) -from .dataset import ( - Dataset, - ImportDataConfig, - ExportDataConfig, -) -from .manual_batch_tuning_parameters import ManualBatchTuningParameters -from .completion_stats import CompletionStats -from .model_evaluation_slice import ModelEvaluationSlice from .machine_resources import ( MachineSpec, DedicatedResources, @@ -39,21 +47,35 @@ ResourcesConsumed, DiskSpec, ) -from .deployed_model_ref import DeployedModelRef +from .manual_batch_tuning_parameters import ManualBatchTuningParameters +from .batch_prediction_job import BatchPredictionJob from .env_var import EnvVar -from .explanation_metadata import ExplanationMetadata -from .explanation import ( - Explanation, - ModelExplanation, - Attribution, - ExplanationSpec, - ExplanationParameters, - SampledShapleyAttribution, - IntegratedGradientsAttribution, - XraiAttribution, - SmoothGradConfig, - FeatureNoiseSigma, +from .custom_job import ( + CustomJob, + CustomJobSpec, + WorkerPoolSpec, + ContainerSpec, + PythonPackageSpec, + Scheduling, ) +from .data_item import DataItem +from .specialist_pool import SpecialistPool +from .data_labeling_job import ( + DataLabelingJob, + ActiveLearningConfig, + SampleConfig, + TrainingConfig, +) +from .dataset import ( + Dataset, + ImportDataConfig, + ExportDataConfig, +) +from .operation import ( + GenericOperationMetadata, + DeleteOperationMetadata, +) +from .deployed_model_ref import DeployedModelRef from .model import ( Model, PredictSchemata, @@ -68,36 +90,44 @@ PredefinedSplit, TimestampSplit, ) -from .model_evaluation import ModelEvaluation -from .migratable_resource import MigratableResource -from .operation import ( - GenericOperationMetadata, - DeleteOperationMetadata, -) -from .migration_service import ( - SearchMigratableResourcesRequest, - SearchMigratableResourcesResponse, - BatchMigrateResourcesRequest, - MigrateResourceRequest, - BatchMigrateResourcesResponse, - MigrateResourceResponse, - BatchMigrateResourcesOperationMetadata, +from .dataset_service import ( + CreateDatasetRequest, + CreateDatasetOperationMetadata, + GetDatasetRequest, + UpdateDatasetRequest, + ListDatasetsRequest, + ListDatasetsResponse, + DeleteDatasetRequest, + ImportDataRequest, + ImportDataResponse, + ImportDataOperationMetadata, + ExportDataRequest, + ExportDataResponse, + ExportDataOperationMetadata, + ListDataItemsRequest, + ListDataItemsResponse, + GetAnnotationSpecRequest, + ListAnnotationsRequest, + ListAnnotationsResponse, ) -from .batch_prediction_job import BatchPredictionJob -from .custom_job import ( - CustomJob, - CustomJobSpec, - WorkerPoolSpec, - ContainerSpec, - PythonPackageSpec, - Scheduling, +from .endpoint import ( + Endpoint, + DeployedModel, ) -from .specialist_pool import SpecialistPool -from .data_labeling_job import ( - DataLabelingJob, - ActiveLearningConfig, - SampleConfig, - TrainingConfig, +from .endpoint_service import ( + CreateEndpointRequest, + CreateEndpointOperationMetadata, + GetEndpointRequest, + ListEndpointsRequest, + ListEndpointsResponse, + UpdateEndpointRequest, + DeleteEndpointRequest, + DeployModelRequest, + DeployModelResponse, + DeployModelOperationMetadata, + UndeployModelRequest, + UndeployModelResponse, + UndeployModelOperationMetadata, ) from .study import ( Trial, @@ -131,51 +161,18 @@ DeleteBatchPredictionJobRequest, CancelBatchPredictionJobRequest, ) -from .user_action_reference import UserActionReference -from .annotation import Annotation -from .endpoint import ( - Endpoint, - DeployedModel, -) -from .prediction_service import ( - PredictRequest, - PredictResponse, - ExplainRequest, - ExplainResponse, -) -from .endpoint_service import ( - CreateEndpointRequest, - CreateEndpointOperationMetadata, - GetEndpointRequest, - ListEndpointsRequest, - ListEndpointsResponse, - UpdateEndpointRequest, - DeleteEndpointRequest, - DeployModelRequest, - DeployModelResponse, - DeployModelOperationMetadata, - UndeployModelRequest, - UndeployModelResponse, - UndeployModelOperationMetadata, -) -from .pipeline_service import ( - CreateTrainingPipelineRequest, - GetTrainingPipelineRequest, - ListTrainingPipelinesRequest, - ListTrainingPipelinesResponse, - DeleteTrainingPipelineRequest, - CancelTrainingPipelineRequest, -) -from .specialist_pool_service import ( - CreateSpecialistPoolRequest, - CreateSpecialistPoolOperationMetadata, - GetSpecialistPoolRequest, - ListSpecialistPoolsRequest, - ListSpecialistPoolsResponse, - DeleteSpecialistPoolRequest, - UpdateSpecialistPoolRequest, - UpdateSpecialistPoolOperationMetadata, +from .migratable_resource import MigratableResource +from .migration_service import ( + SearchMigratableResourcesRequest, + SearchMigratableResourcesResponse, + BatchMigrateResourcesRequest, + MigrateResourceRequest, + BatchMigrateResourcesResponse, + MigrateResourceResponse, + BatchMigrateResourcesOperationMetadata, ) +from .model_evaluation import ModelEvaluation +from .model_evaluation_slice import ModelEvaluationSlice from .model_service import ( UploadModelRequest, UploadModelOperationMetadata, @@ -195,50 +192,37 @@ ListModelEvaluationSlicesRequest, ListModelEvaluationSlicesResponse, ) -from .data_item import DataItem -from .dataset_service import ( - CreateDatasetRequest, - CreateDatasetOperationMetadata, - GetDatasetRequest, - UpdateDatasetRequest, - ListDatasetsRequest, - ListDatasetsResponse, - DeleteDatasetRequest, - ImportDataRequest, - ImportDataResponse, - ImportDataOperationMetadata, - ExportDataRequest, - ExportDataResponse, - ExportDataOperationMetadata, - ListDataItemsRequest, - ListDataItemsResponse, - GetAnnotationSpecRequest, - ListAnnotationsRequest, - ListAnnotationsResponse, +from .pipeline_service import ( + CreateTrainingPipelineRequest, + GetTrainingPipelineRequest, + ListTrainingPipelinesRequest, + ListTrainingPipelinesResponse, + DeleteTrainingPipelineRequest, + CancelTrainingPipelineRequest, +) +from .prediction_service import ( + PredictRequest, + PredictResponse, + ExplainRequest, + ExplainResponse, +) +from .specialist_pool_service import ( + CreateSpecialistPoolRequest, + CreateSpecialistPoolOperationMetadata, + GetSpecialistPoolRequest, + ListSpecialistPoolsRequest, + ListSpecialistPoolsResponse, + DeleteSpecialistPoolRequest, + UpdateSpecialistPoolRequest, + UpdateSpecialistPoolOperationMetadata, ) __all__ = ( + "UserActionReference", + "Annotation", "AnnotationSpec", - "GcsSource", - "GcsDestination", - "BigQuerySource", - "BigQueryDestination", - "ContainerRegistryDestination", - "Dataset", - "ImportDataConfig", - "ExportDataConfig", - "ManualBatchTuningParameters", "CompletionStats", - "ModelEvaluationSlice", - "MachineSpec", - "DedicatedResources", - "AutomaticResources", - "BatchDedicatedResources", - "ResourcesConsumed", - "DiskSpec", - "DeployedModelRef", - "EnvVar", "ExplanationMetadata", "Explanation", "ModelExplanation", @@ -250,39 +234,81 @@ "XraiAttribution", "SmoothGradConfig", "FeatureNoiseSigma", - "Model", - "PredictSchemata", - "ModelContainerSpec", - "Port", - "TrainingPipeline", - "InputDataConfig", - "FractionSplit", - "FilterSplit", - "PredefinedSplit", - "TimestampSplit", - "ModelEvaluation", - "MigratableResource", - "GenericOperationMetadata", - "DeleteOperationMetadata", - "SearchMigratableResourcesRequest", - "SearchMigratableResourcesResponse", - "BatchMigrateResourcesRequest", - "MigrateResourceRequest", - "BatchMigrateResourcesResponse", - "MigrateResourceResponse", - "BatchMigrateResourcesOperationMetadata", + "GcsSource", + "GcsDestination", + "BigQuerySource", + "BigQueryDestination", + "ContainerRegistryDestination", + "MachineSpec", + "DedicatedResources", + "AutomaticResources", + "BatchDedicatedResources", + "ResourcesConsumed", + "DiskSpec", + "ManualBatchTuningParameters", "BatchPredictionJob", + "EnvVar", "CustomJob", "CustomJobSpec", "WorkerPoolSpec", "ContainerSpec", "PythonPackageSpec", "Scheduling", + "DataItem", "SpecialistPool", "DataLabelingJob", "ActiveLearningConfig", "SampleConfig", "TrainingConfig", + "Dataset", + "ImportDataConfig", + "ExportDataConfig", + "GenericOperationMetadata", + "DeleteOperationMetadata", + "DeployedModelRef", + "Model", + "PredictSchemata", + "ModelContainerSpec", + "Port", + "TrainingPipeline", + "InputDataConfig", + "FractionSplit", + "FilterSplit", + "PredefinedSplit", + "TimestampSplit", + "CreateDatasetRequest", + "CreateDatasetOperationMetadata", + "GetDatasetRequest", + "UpdateDatasetRequest", + "ListDatasetsRequest", + "ListDatasetsResponse", + "DeleteDatasetRequest", + "ImportDataRequest", + "ImportDataResponse", + "ImportDataOperationMetadata", + "ExportDataRequest", + "ExportDataResponse", + "ExportDataOperationMetadata", + "ListDataItemsRequest", + "ListDataItemsResponse", + "GetAnnotationSpecRequest", + "ListAnnotationsRequest", + "ListAnnotationsResponse", + "Endpoint", + "DeployedModel", + "CreateEndpointRequest", + "CreateEndpointOperationMetadata", + "GetEndpointRequest", + "ListEndpointsRequest", + "ListEndpointsResponse", + "UpdateEndpointRequest", + "DeleteEndpointRequest", + "DeployModelRequest", + "DeployModelResponse", + "DeployModelOperationMetadata", + "UndeployModelRequest", + "UndeployModelResponse", + "UndeployModelOperationMetadata", "Trial", "StudySpec", "Measurement", @@ -311,41 +337,16 @@ "ListBatchPredictionJobsResponse", "DeleteBatchPredictionJobRequest", "CancelBatchPredictionJobRequest", - "UserActionReference", - "Annotation", - "Endpoint", - "DeployedModel", - "PredictRequest", - "PredictResponse", - "ExplainRequest", - "ExplainResponse", - "CreateEndpointRequest", - "CreateEndpointOperationMetadata", - "GetEndpointRequest", - "ListEndpointsRequest", - "ListEndpointsResponse", - "UpdateEndpointRequest", - "DeleteEndpointRequest", - "DeployModelRequest", - "DeployModelResponse", - "DeployModelOperationMetadata", - "UndeployModelRequest", - "UndeployModelResponse", - "UndeployModelOperationMetadata", - "CreateTrainingPipelineRequest", - "GetTrainingPipelineRequest", - "ListTrainingPipelinesRequest", - "ListTrainingPipelinesResponse", - "DeleteTrainingPipelineRequest", - "CancelTrainingPipelineRequest", - "CreateSpecialistPoolRequest", - "CreateSpecialistPoolOperationMetadata", - "GetSpecialistPoolRequest", - "ListSpecialistPoolsRequest", - "ListSpecialistPoolsResponse", - "DeleteSpecialistPoolRequest", - "UpdateSpecialistPoolRequest", - "UpdateSpecialistPoolOperationMetadata", + "MigratableResource", + "SearchMigratableResourcesRequest", + "SearchMigratableResourcesResponse", + "BatchMigrateResourcesRequest", + "MigrateResourceRequest", + "BatchMigrateResourcesResponse", + "MigrateResourceResponse", + "BatchMigrateResourcesOperationMetadata", + "ModelEvaluation", + "ModelEvaluationSlice", "UploadModelRequest", "UploadModelOperationMetadata", "UploadModelResponse", @@ -363,23 +364,22 @@ "GetModelEvaluationSliceRequest", "ListModelEvaluationSlicesRequest", "ListModelEvaluationSlicesResponse", - "DataItem", - "CreateDatasetRequest", - "CreateDatasetOperationMetadata", - "GetDatasetRequest", - "UpdateDatasetRequest", - "ListDatasetsRequest", - "ListDatasetsResponse", - "DeleteDatasetRequest", - "ImportDataRequest", - "ImportDataResponse", - "ImportDataOperationMetadata", - "ExportDataRequest", - "ExportDataResponse", - "ExportDataOperationMetadata", - "ListDataItemsRequest", - "ListDataItemsResponse", - "GetAnnotationSpecRequest", - "ListAnnotationsRequest", - "ListAnnotationsResponse", + "CreateTrainingPipelineRequest", + "GetTrainingPipelineRequest", + "ListTrainingPipelinesRequest", + "ListTrainingPipelinesResponse", + "DeleteTrainingPipelineRequest", + "CancelTrainingPipelineRequest", + "PredictRequest", + "PredictResponse", + "ExplainRequest", + "ExplainResponse", + "CreateSpecialistPoolRequest", + "CreateSpecialistPoolOperationMetadata", + "GetSpecialistPoolRequest", + "ListSpecialistPoolsRequest", + "ListSpecialistPoolsResponse", + "DeleteSpecialistPoolRequest", + "UpdateSpecialistPoolRequest", + "UpdateSpecialistPoolOperationMetadata", ) diff --git a/noxfile.py b/noxfile.py index 1797beebfd..87765339b5 100644 --- a/noxfile.py +++ b/noxfile.py @@ -28,7 +28,7 @@ DEFAULT_PYTHON_VERSION = "3.8" SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] -UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -81,9 +81,8 @@ def default(session): session.run( "py.test", "--quiet", - "--cov=google.cloud.aiplatform", - "--cov=google.cloud", - "--cov=tests.unit", + "--cov=google/cloud", + "--cov=tests/unit", "--cov-append", "--cov-config=.coveragerc", "--cov-report=", diff --git a/synth.metadata b/synth.metadata index 9399d8c2e3..b39f24bbb9 100644 --- a/synth.metadata +++ b/synth.metadata @@ -3,15 +3,22 @@ { "git": { "name": ".", - "remote": "https://github.com/dizcology/python-aiplatform.git", - "sha": "81da030c0af8902fd54c8e7b5e92255a532d0efb" + "remote": "https://github.com/googleapis/python-aiplatform.git", + "sha": "688a06ff0bcc291cb63225787b7083e0b96b3615" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "ba9918cd22874245b55734f57470c719b577e591" + "sha": "f94318521f63085b9ccb43d42af89f153fb39f15" + } + }, + { + "git": { + "name": "synthtool", + "remote": "https://github.com/googleapis/synthtool.git", + "sha": "f94318521f63085b9ccb43d42af89f153fb39f15" } } ], @@ -22,7 +29,7 @@ "apiName": "aiplatform", "apiVersion": "v1beta1", "language": "python", - "generator": "gapic-generator-python" + "generator": "bazel" } } ] diff --git a/synth.py b/synth.py index 8685e21af7..107235edac 100644 --- a/synth.py +++ b/synth.py @@ -37,12 +37,26 @@ s.move( library, excludes=[ + ".pre-commit-config.yaml", "setup.py", "README.rst", "docs/index.rst", + "docs/definition_v1beta1/services.rst", + "docs/instance_v1beta1/services.rst", + "docs/params_v1beta1/services.rst", + "docs/prediction_v1beta1/services.rst", "scripts/fixup_aiplatform_v1beta1_keywords.py", + "scripts/fixup_definition_v1beta1_keywords.py", + "scripts/fixup_instance_v1beta1_keywords.py", + "scripts/fixup_params_v1beta1_keywords.py", + "scripts/fixup_prediction_v1beta1_keywords.py", "google/cloud/aiplatform/__init__.py", + "google/cloud/aiplatform/v1beta1/schema/**/services/", "tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py", + "tests/unit/gapic/definition_v1beta1/", + "tests/unit/gapic/instance_v1beta1/", + "tests/unit/gapic/params_v1beta1/", + "tests/unit/gapic/prediction_v1beta1/", ], )