diff --git a/README.rst b/README.rst index bfab72eb..af1699ec 100644 --- a/README.rst +++ b/README.rst @@ -56,7 +56,9 @@ Python >= 3.5 Deprecated Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^^ -Python == 2.7. Python 2.7 support will be removed on January 1, 2020. +Python == 2.7. + +The last version of this library compatible with Python 2.7 is google-cloud-automl==1.0.1. Mac/Linux @@ -80,18 +82,6 @@ Windows \Scripts\activate \Scripts\pip.exe install google-cloud-automl -Example Usage -~~~~~~~~~~~~~ - -.. code-block:: python - - from google.cloud.automl_v1beta1 import PredictionServiceClient - - client = PredictionServiceClient() - model_path = client.model_path('my-project-123', 'us-central', 'model-name') - payload = {...} - params = {'foo': 1} - response = client.predict(model_path, payload, params=params) Next Steps ~~~~~~~~~~ @@ -100,32 +90,3 @@ Next Steps API to see other available methods on the client. - Read the `Product documentation`_ to learn more about the product and see How-to Guides. - -Making & Testing Local Changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you want to make changes to this library, here is how to set up your -development environment: - -1. Make sure you have `virtualenv`_ installed and activated as shown above. -2. Run the following one-time setup (it will be persisted in your virtualenv): - - .. code-block:: console - - pip install -r ../docs/requirements.txt - pip install -U nox mock pytest - -3. If you want to run all tests, you will need a billing-enabled - `GCP project`_, and a `service account`_ with access to the AutoML APIs. - Note: the first time the tests run in a new project it will take a _long_ - time, on the order of 2-3 hours. This is one-time setup that will be skipped - in future runs. - -.. _service account: https://cloud.google.com/iam/docs/creating-managing-service-accounts -.. _GCP project: https://cloud.google.com/resource-manager/docs/creating-managing-projects - -.. code-block:: console - - export PROJECT_ID= GOOGLE_APPLICATION_CREDENTIALS= - nox - diff --git a/UPGRADING.md b/UPGRADING.md new file mode 100644 index 00000000..d7b3ec05 --- /dev/null +++ b/UPGRADING.md @@ -0,0 +1,269 @@ +# 3.0.0 Migration Guide + +The 2.0 release of the `google-cloud-automl` client is a significant upgrade based on a [next-gen code generator](https://github.com/googleapis/gapic-generator-python), and includes substantial interface changes. Existing code written for earlier versions of this library will likely require updates to use this version. This document describes the changes that have been made, and what you need to do to update your usage. + +If you experience issues or have questions, please file an [issue](https://github.com/googleapis/python-automl/issues). + +## Supported Python Versions + +> **WARNING**: Breaking change + +The 2.0.0 release requires Python 3.6+. + + +## Method Calls + +> **WARNING**: Breaking change + +Methods expect request objects. We provide a script that will convert most common use cases. + +* Install the library + +```py +python3 -m pip install google-cloud-automl +``` + +* The script `fixup_automl_{version}_keywords.py` is shipped with the library. It expects +an input directory (with the code to convert) and an empty destination directory. + +```sh +$ fixup_automl_v1_keywords.py --input-directory .samples/ --output-directory samples/ +``` + +**Before:** +```py +from google.cloud import automl + +project_id = "YOUR_PROJECT_ID" +model_id = "YOUR_MODEL_ID" + +client = automl.AutoMlClient() +# Get the full path of the model. +model_full_id = client.model_path(project_id, "us-central1", model_id) +response = client.deploy_model(model_full_id) +``` + + +**After:** +```py +from google.cloud import automl + +project_id = "YOUR_PROJECT_ID" +model_id = "YOUR_MODEL_ID" + +client = automl.AutoMlClient() +# Get the full path of the model. +model_full_id = client.model_path(project_id, "us-central1", model_id) +response = client.deploy_model(name=model_full_id) +``` + +### More Details + +In `google-cloud-automl<2.0.0`, parameters required by the API were positional parameters and optional parameters were keyword parameters. + +**Before:** +```py + def batch_predict( + self, + name, + input_config, + output_config, + params=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): +``` + +In the 2.0.0 release, all methods have a single positional parameter `request`. Method docstrings indicate whether a parameter is required or optional. + +Some methods have additional keyword only parameters. The available parameters depend on the [`google.api.method_signature` annotation](https://github.com/googleapis/googleapis/blob/2db5725bf898b544a0cf951e1694d3b0fce5eda3/google/cloud/automl/v1/prediction_service.proto#L86) specified by the API producer. + + +**After:** +```py +def batch_predict( + self, + request: prediction_service.BatchPredictRequest = None, + *, + name: str = None, + input_config: io.BatchPredictInputConfig = None, + output_config: io.BatchPredictOutputConfig = None, + params: Sequence[prediction_service.BatchPredictRequest.ParamsEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: +``` + +> **NOTE:** The `request` parameter and flattened keyword parameters for the API are mutually exclusive. +> Passing both will result in an error. + + +Both of these calls are valid: + +```py +response = client.batch_predict( + request={ + "name": name, + "input_config": input_config, + "output_config": output_config, + "params": params, + } +) +``` + +```py +response = client.batch_predict( + name=name, + input_config=input_config, + output_config=output_config, + params=params, +) +``` + +This call is invalid because it mixes `request` with a keyword argument `params`. Executing this code +will result in an error. + +```py +response = client.batch_predict( + request={ + "name": name, + "input_config": input_config, + "output_config": output_config, + }, + params=params, +) +``` + + +The method `list_datasets` takes an argument `filter` instead of `filter_`. + +**Before** +```py +from google.cloud import automl + +project_id = "PROJECT_ID" + +client = automl.AutoMlClient() +project_location = client.location_path(project_id, "us-central1") + +# List all the datasets available in the region. +response = client.list_datasets(project_location, filter_="") +``` + +**After** +```py +from google.cloud import automl + +project_id = "PROJECT_ID" +client = automl.AutoMlClient() +# A resource that represents Google Cloud Platform location. +project_location = f"projects/{project_id}/locations/us-central1" + +# List all the datasets available in the region. +response = client.list_datasets(parent=project_location, filter="") +``` + +### Changes to v1beta1 Tables Client + +Optional arguments are now keyword-only arguments and *must* be passed by name. +See [PEP 3102](https://www.python.org/dev/peps/pep-3102/). + +***Before** +```py + def predict( + self, + inputs, + model=None, + model_name=None, + model_display_name=None, + feature_importance=False, + project=None, + region=None, + **kwargs + ): +``` + +**After** +```py + def predict( + self, + inputs, + *, + model=None, + model_name=None, + model_display_name=None, + feature_importance=False, + project=None, + region=None, + **kwargs, + ): +``` + +**kwargs passed to methods must be either (1) kwargs on the underlying method (`retry`, `timeout`, or `metadata`) or (2) attributes of the request object. + +The following call is valid because `filter` is an attribute of `automl_v1beta1.ListDatasetsRequest`. + +```py +from google.cloud import automl_v1beta1 as automl + +client = automl.TablesClient(project=project_id, region=compute_region) + +# List all the datasets available in the region by applying filter. +response = client.list_datasets(filter=filter) +``` + + + +## Enums and types + + +> **WARNING**: Breaking change + +The submodule `enums` and `types` have been removed. + +**Before:** +```py + +from google.cloud import automl + +gcs_source = automl.types.GcsSource(input_uris=["gs://YOUR_BUCKET_ID/path/to/your/input/csv_or_jsonl"]) +deployment_state = automl.enums.Model.DeploymentState.DEPLOYED +``` + + +**After:** +```py +from google.cloud import automl + +gcs_source = automl.GcsSource(input_uris=["gs://YOUR_BUCKET_ID/path/to/your/input/csv_or_jsonl"]) +deployment_state = automl.Model.DeploymentState.DEPLOYED +``` + + +## Resource Path Helper Methods + +The following resource name helpers have been removed. Please construct the strings manually. + +```py +from google.cloud import automl + +project = "my-project" +location = "us-central1" +dataset = "my-dataset" +model = "my-model" +annotation_spec = "test-annotation" +model_evaluation = "test-evaluation" + +# AutoMlClient +annotation_spec_path = f"projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}" +location_path = f"projects/{project}/locations/{location}" +model_evaluation_path = f"projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}", + +# PredictionServiceClient +model_path = f"projects/{project}/locations/{location}/models/{model}" +# alternatively you can use `model_path` from AutoMlClient +model_path = automl.AutoMlClient.model_path(project_id, location, model_id) + +``` \ No newline at end of file diff --git a/docs/UPGRADING.md b/docs/UPGRADING.md new file mode 120000 index 00000000..01097c8c --- /dev/null +++ b/docs/UPGRADING.md @@ -0,0 +1 @@ +../UPGRADING.md \ No newline at end of file diff --git a/docs/automl_v1/services.rst b/docs/automl_v1/services.rst new file mode 100644 index 00000000..b57ca45e --- /dev/null +++ b/docs/automl_v1/services.rst @@ -0,0 +1,9 @@ +Services for Google Cloud Automl v1 API +======================================= + +.. automodule:: google.cloud.automl_v1.services.auto_ml + :members: + :inherited-members: +.. automodule:: google.cloud.automl_v1.services.prediction_service + :members: + :inherited-members: diff --git a/docs/automl_v1/types.rst b/docs/automl_v1/types.rst new file mode 100644 index 00000000..47a76a80 --- /dev/null +++ b/docs/automl_v1/types.rst @@ -0,0 +1,5 @@ +Types for Google Cloud Automl v1 API +==================================== + +.. automodule:: google.cloud.automl_v1.types + :members: diff --git a/docs/automl_v1beta1/services.rst b/docs/automl_v1beta1/services.rst new file mode 100644 index 00000000..787e8566 --- /dev/null +++ b/docs/automl_v1beta1/services.rst @@ -0,0 +1,12 @@ +Services for Google Cloud Automl v1beta1 API +============================================ + +.. automodule:: google.cloud.automl_v1beta1.services.auto_ml + :members: + :inherited-members: +.. automodule:: google.cloud.automl_v1beta1.services.prediction_service + :members: + :inherited-members: +.. automodule:: google.cloud.automl_v1beta1.services.tables + :members: + :inherited-members: diff --git a/docs/automl_v1beta1/types.rst b/docs/automl_v1beta1/types.rst new file mode 100644 index 00000000..bf190b5b --- /dev/null +++ b/docs/automl_v1beta1/types.rst @@ -0,0 +1,5 @@ +Types for Google Cloud Automl v1beta1 API +========================================= + +.. automodule:: google.cloud.automl_v1beta1.types + :members: diff --git a/docs/gapic/v1/api.rst b/docs/gapic/v1/api.rst deleted file mode 100644 index 757fc1a0..00000000 --- a/docs/gapic/v1/api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Client for Cloud AutoML API -=========================== - -.. automodule:: google.cloud.automl_v1 - :members: - :inherited-members: \ No newline at end of file diff --git a/docs/gapic/v1/types.rst b/docs/gapic/v1/types.rst deleted file mode 100644 index 5fd25134..00000000 --- a/docs/gapic/v1/types.rst +++ /dev/null @@ -1,5 +0,0 @@ -Types for Cloud AutoML API Client -================================= - -.. automodule:: google.cloud.automl_v1.types - :members: \ No newline at end of file diff --git a/docs/gapic/v1beta1/api.rst b/docs/gapic/v1beta1/api.rst deleted file mode 100644 index 268baa5b..00000000 --- a/docs/gapic/v1beta1/api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Client for Cloud AutoML API -=========================== - -.. automodule:: google.cloud.automl_v1beta1 - :members: - :inherited-members: \ No newline at end of file diff --git a/docs/gapic/v1beta1/tables.rst b/docs/gapic/v1beta1/tables.rst deleted file mode 100644 index 54ed6a20..00000000 --- a/docs/gapic/v1beta1/tables.rst +++ /dev/null @@ -1,5 +0,0 @@ -A tables-specific client for AutoML -=================================== - -.. automodule:: google.cloud.automl_v1beta1.tables.tables_client - :members: diff --git a/docs/gapic/v1beta1/types.rst b/docs/gapic/v1beta1/types.rst deleted file mode 100644 index 27ce6644..00000000 --- a/docs/gapic/v1beta1/types.rst +++ /dev/null @@ -1,5 +0,0 @@ -Types for Cloud AutoML API Client -================================= - -.. automodule:: google.cloud.automl_v1beta1.types - :members: \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index 5473e0d7..05219311 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -10,8 +10,8 @@ v1 API Reference .. toctree:: :maxdepth: 2 - gapic/v1/api - gapic/v1/types + automl_v1/services + automl_v1/types Previous beta release v1beta1 is provided as well. @@ -22,9 +22,19 @@ v1beta1 API Reference .. toctree:: :maxdepth: 2 - gapic/v1beta1/api - gapic/v1beta1/types - gapic/v1beta1/tables + automl_v1beta1/services + automl_v1beta1/types + + +Migration Guide +--------------- + +See the guide below for instructions on migrating to the 2.x release of this library. + +.. toctree:: + :maxdepth: 2 + + UPGRADING @@ -37,3 +47,5 @@ For a list of all ``google-cloud-automl`` releases: :maxdepth: 2 changelog + + diff --git a/google/cloud/automl/__init__.py b/google/cloud/automl/__init__.py new file mode 100644 index 00000000..e6e6a762 --- /dev/null +++ b/google/cloud/automl/__init__.py @@ -0,0 +1,200 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.automl_v1.services.auto_ml.async_client import AutoMlAsyncClient +from google.cloud.automl_v1.services.auto_ml.client import AutoMlClient +from google.cloud.automl_v1.services.prediction_service.async_client import ( + PredictionServiceAsyncClient, +) +from google.cloud.automl_v1.services.prediction_service.client import ( + PredictionServiceClient, +) +from google.cloud.automl_v1.types.annotation_payload import AnnotationPayload +from google.cloud.automl_v1.types.annotation_spec import AnnotationSpec +from google.cloud.automl_v1.types.classification import ClassificationAnnotation +from google.cloud.automl_v1.types.classification import ClassificationEvaluationMetrics +from google.cloud.automl_v1.types.classification import ClassificationType +from google.cloud.automl_v1.types.data_items import Document +from google.cloud.automl_v1.types.data_items import DocumentDimensions +from google.cloud.automl_v1.types.data_items import ExamplePayload +from google.cloud.automl_v1.types.data_items import Image +from google.cloud.automl_v1.types.data_items import TextSnippet +from google.cloud.automl_v1.types.dataset import Dataset +from google.cloud.automl_v1.types.detection import BoundingBoxMetricsEntry +from google.cloud.automl_v1.types.detection import ImageObjectDetectionAnnotation +from google.cloud.automl_v1.types.detection import ImageObjectDetectionEvaluationMetrics +from google.cloud.automl_v1.types.geometry import BoundingPoly +from google.cloud.automl_v1.types.geometry import NormalizedVertex +from google.cloud.automl_v1.types.image import ImageClassificationDatasetMetadata +from google.cloud.automl_v1.types.image import ( + ImageClassificationModelDeploymentMetadata, +) +from google.cloud.automl_v1.types.image import ImageClassificationModelMetadata +from google.cloud.automl_v1.types.image import ImageObjectDetectionDatasetMetadata +from google.cloud.automl_v1.types.image import ( + ImageObjectDetectionModelDeploymentMetadata, +) +from google.cloud.automl_v1.types.image import ImageObjectDetectionModelMetadata +from google.cloud.automl_v1.types.io import BatchPredictInputConfig +from google.cloud.automl_v1.types.io import BatchPredictOutputConfig +from google.cloud.automl_v1.types.io import DocumentInputConfig +from google.cloud.automl_v1.types.io import GcsDestination +from google.cloud.automl_v1.types.io import GcsSource +from google.cloud.automl_v1.types.io import InputConfig +from google.cloud.automl_v1.types.io import ModelExportOutputConfig +from google.cloud.automl_v1.types.io import OutputConfig +from google.cloud.automl_v1.types.model import Model +from google.cloud.automl_v1.types.model_evaluation import ModelEvaluation +from google.cloud.automl_v1.types.operations import BatchPredictOperationMetadata +from google.cloud.automl_v1.types.operations import CreateDatasetOperationMetadata +from google.cloud.automl_v1.types.operations import CreateModelOperationMetadata +from google.cloud.automl_v1.types.operations import DeleteOperationMetadata +from google.cloud.automl_v1.types.operations import DeployModelOperationMetadata +from google.cloud.automl_v1.types.operations import ExportDataOperationMetadata +from google.cloud.automl_v1.types.operations import ExportModelOperationMetadata +from google.cloud.automl_v1.types.operations import ImportDataOperationMetadata +from google.cloud.automl_v1.types.operations import OperationMetadata +from google.cloud.automl_v1.types.operations import UndeployModelOperationMetadata +from google.cloud.automl_v1.types.prediction_service import BatchPredictRequest +from google.cloud.automl_v1.types.prediction_service import BatchPredictResult +from google.cloud.automl_v1.types.prediction_service import PredictRequest +from google.cloud.automl_v1.types.prediction_service import PredictResponse +from google.cloud.automl_v1.types.service import CreateDatasetRequest +from google.cloud.automl_v1.types.service import CreateModelRequest +from google.cloud.automl_v1.types.service import DeleteDatasetRequest +from google.cloud.automl_v1.types.service import DeleteModelRequest +from google.cloud.automl_v1.types.service import DeployModelRequest +from google.cloud.automl_v1.types.service import ExportDataRequest +from google.cloud.automl_v1.types.service import ExportModelRequest +from google.cloud.automl_v1.types.service import GetAnnotationSpecRequest +from google.cloud.automl_v1.types.service import GetDatasetRequest +from google.cloud.automl_v1.types.service import GetModelEvaluationRequest +from google.cloud.automl_v1.types.service import GetModelRequest +from google.cloud.automl_v1.types.service import ImportDataRequest +from google.cloud.automl_v1.types.service import ListDatasetsRequest +from google.cloud.automl_v1.types.service import ListDatasetsResponse +from google.cloud.automl_v1.types.service import ListModelEvaluationsRequest +from google.cloud.automl_v1.types.service import ListModelEvaluationsResponse +from google.cloud.automl_v1.types.service import ListModelsRequest +from google.cloud.automl_v1.types.service import ListModelsResponse +from google.cloud.automl_v1.types.service import UndeployModelRequest +from google.cloud.automl_v1.types.service import UpdateDatasetRequest +from google.cloud.automl_v1.types.service import UpdateModelRequest +from google.cloud.automl_v1.types.text import TextClassificationDatasetMetadata +from google.cloud.automl_v1.types.text import TextClassificationModelMetadata +from google.cloud.automl_v1.types.text import TextExtractionDatasetMetadata +from google.cloud.automl_v1.types.text import TextExtractionModelMetadata +from google.cloud.automl_v1.types.text import TextSentimentDatasetMetadata +from google.cloud.automl_v1.types.text import TextSentimentModelMetadata +from google.cloud.automl_v1.types.text_extraction import TextExtractionAnnotation +from google.cloud.automl_v1.types.text_extraction import TextExtractionEvaluationMetrics +from google.cloud.automl_v1.types.text_segment import TextSegment +from google.cloud.automl_v1.types.text_sentiment import TextSentimentAnnotation +from google.cloud.automl_v1.types.text_sentiment import TextSentimentEvaluationMetrics +from google.cloud.automl_v1.types.translation import TranslationAnnotation +from google.cloud.automl_v1.types.translation import TranslationDatasetMetadata +from google.cloud.automl_v1.types.translation import TranslationEvaluationMetrics +from google.cloud.automl_v1.types.translation import TranslationModelMetadata + +__all__ = ( + "AnnotationPayload", + "AnnotationSpec", + "AutoMlAsyncClient", + "AutoMlClient", + "BatchPredictInputConfig", + "BatchPredictOperationMetadata", + "BatchPredictOutputConfig", + "BatchPredictRequest", + "BatchPredictResult", + "BoundingBoxMetricsEntry", + "BoundingPoly", + "ClassificationAnnotation", + "ClassificationEvaluationMetrics", + "ClassificationType", + "CreateDatasetOperationMetadata", + "CreateDatasetRequest", + "CreateModelOperationMetadata", + "CreateModelRequest", + "Dataset", + "DeleteDatasetRequest", + "DeleteModelRequest", + "DeleteOperationMetadata", + "DeployModelOperationMetadata", + "DeployModelRequest", + "Document", + "DocumentDimensions", + "DocumentInputConfig", + "ExamplePayload", + "ExportDataOperationMetadata", + "ExportDataRequest", + "ExportModelOperationMetadata", + "ExportModelRequest", + "GcsDestination", + "GcsSource", + "GetAnnotationSpecRequest", + "GetDatasetRequest", + "GetModelEvaluationRequest", + "GetModelRequest", + "Image", + "ImageClassificationDatasetMetadata", + "ImageClassificationModelDeploymentMetadata", + "ImageClassificationModelMetadata", + "ImageObjectDetectionAnnotation", + "ImageObjectDetectionDatasetMetadata", + "ImageObjectDetectionEvaluationMetrics", + "ImageObjectDetectionModelDeploymentMetadata", + "ImageObjectDetectionModelMetadata", + "ImportDataOperationMetadata", + "ImportDataRequest", + "InputConfig", + "ListDatasetsRequest", + "ListDatasetsResponse", + "ListModelEvaluationsRequest", + "ListModelEvaluationsResponse", + "ListModelsRequest", + "ListModelsResponse", + "Model", + "ModelEvaluation", + "ModelExportOutputConfig", + "NormalizedVertex", + "OperationMetadata", + "OutputConfig", + "PredictRequest", + "PredictResponse", + "PredictionServiceAsyncClient", + "PredictionServiceClient", + "TextClassificationDatasetMetadata", + "TextClassificationModelMetadata", + "TextExtractionAnnotation", + "TextExtractionDatasetMetadata", + "TextExtractionEvaluationMetrics", + "TextExtractionModelMetadata", + "TextSegment", + "TextSentimentAnnotation", + "TextSentimentDatasetMetadata", + "TextSentimentEvaluationMetrics", + "TextSentimentModelMetadata", + "TextSnippet", + "TranslationAnnotation", + "TranslationDatasetMetadata", + "TranslationEvaluationMetrics", + "TranslationModelMetadata", + "UndeployModelOperationMetadata", + "UndeployModelRequest", + "UpdateDatasetRequest", + "UpdateModelRequest", +) diff --git a/google/cloud/automl/py.typed b/google/cloud/automl/py.typed new file mode 100644 index 00000000..0560ba18 --- /dev/null +++ b/google/cloud/automl/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-automl package uses inline types. diff --git a/google/cloud/automl_v1/__init__.py b/google/cloud/automl_v1/__init__.py index 3c9ade66..b5f76f81 100644 --- a/google/cloud/automl_v1/__init__.py +++ b/google/cloud/automl_v1/__init__.py @@ -1,52 +1,189 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.automl_v1 import types -from google.cloud.automl_v1.gapic import auto_ml_client -from google.cloud.automl_v1.gapic import enums -from google.cloud.automl_v1.gapic import prediction_service_client - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7. " - "More details about Python 2 support for Google Cloud Client Libraries " - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class PredictionServiceClient(prediction_service_client.PredictionServiceClient): - __doc__ = prediction_service_client.PredictionServiceClient.__doc__ - enums = enums - - -class AutoMlClient(auto_ml_client.AutoMlClient): - __doc__ = auto_ml_client.AutoMlClient.__doc__ - enums = enums +from .services.auto_ml import AutoMlClient +from .services.prediction_service import PredictionServiceClient +from .types.annotation_payload import AnnotationPayload +from .types.annotation_spec import AnnotationSpec +from .types.classification import ClassificationAnnotation +from .types.classification import ClassificationEvaluationMetrics +from .types.classification import ClassificationType +from .types.data_items import Document +from .types.data_items import DocumentDimensions +from .types.data_items import ExamplePayload +from .types.data_items import Image +from .types.data_items import TextSnippet +from .types.dataset import Dataset +from .types.detection import BoundingBoxMetricsEntry +from .types.detection import ImageObjectDetectionAnnotation +from .types.detection import ImageObjectDetectionEvaluationMetrics +from .types.geometry import BoundingPoly +from .types.geometry import NormalizedVertex +from .types.image import ImageClassificationDatasetMetadata +from .types.image import ImageClassificationModelDeploymentMetadata +from .types.image import ImageClassificationModelMetadata +from .types.image import ImageObjectDetectionDatasetMetadata +from .types.image import ImageObjectDetectionModelDeploymentMetadata +from .types.image import ImageObjectDetectionModelMetadata +from .types.io import BatchPredictInputConfig +from .types.io import BatchPredictOutputConfig +from .types.io import DocumentInputConfig +from .types.io import GcsDestination +from .types.io import GcsSource +from .types.io import InputConfig +from .types.io import ModelExportOutputConfig +from .types.io import OutputConfig +from .types.model import Model +from .types.model_evaluation import ModelEvaluation +from .types.operations import BatchPredictOperationMetadata +from .types.operations import CreateDatasetOperationMetadata +from .types.operations import CreateModelOperationMetadata +from .types.operations import DeleteOperationMetadata +from .types.operations import DeployModelOperationMetadata +from .types.operations import ExportDataOperationMetadata +from .types.operations import ExportModelOperationMetadata +from .types.operations import ImportDataOperationMetadata +from .types.operations import OperationMetadata +from .types.operations import UndeployModelOperationMetadata +from .types.prediction_service import BatchPredictRequest +from .types.prediction_service import BatchPredictResult +from .types.prediction_service import PredictRequest +from .types.prediction_service import PredictResponse +from .types.service import CreateDatasetRequest +from .types.service import CreateModelRequest +from .types.service import DeleteDatasetRequest +from .types.service import DeleteModelRequest +from .types.service import DeployModelRequest +from .types.service import ExportDataRequest +from .types.service import ExportModelRequest +from .types.service import GetAnnotationSpecRequest +from .types.service import GetDatasetRequest +from .types.service import GetModelEvaluationRequest +from .types.service import GetModelRequest +from .types.service import ImportDataRequest +from .types.service import ListDatasetsRequest +from .types.service import ListDatasetsResponse +from .types.service import ListModelEvaluationsRequest +from .types.service import ListModelEvaluationsResponse +from .types.service import ListModelsRequest +from .types.service import ListModelsResponse +from .types.service import UndeployModelRequest +from .types.service import UpdateDatasetRequest +from .types.service import UpdateModelRequest +from .types.text import TextClassificationDatasetMetadata +from .types.text import TextClassificationModelMetadata +from .types.text import TextExtractionDatasetMetadata +from .types.text import TextExtractionModelMetadata +from .types.text import TextSentimentDatasetMetadata +from .types.text import TextSentimentModelMetadata +from .types.text_extraction import TextExtractionAnnotation +from .types.text_extraction import TextExtractionEvaluationMetrics +from .types.text_segment import TextSegment +from .types.text_sentiment import TextSentimentAnnotation +from .types.text_sentiment import TextSentimentEvaluationMetrics +from .types.translation import TranslationAnnotation +from .types.translation import TranslationDatasetMetadata +from .types.translation import TranslationEvaluationMetrics +from .types.translation import TranslationModelMetadata __all__ = ( - "enums", - "types", + "AnnotationPayload", + "AnnotationSpec", + "BatchPredictInputConfig", + "BatchPredictOperationMetadata", + "BatchPredictOutputConfig", + "BatchPredictRequest", + "BatchPredictResult", + "BoundingBoxMetricsEntry", + "BoundingPoly", + "ClassificationAnnotation", + "ClassificationEvaluationMetrics", + "ClassificationType", + "CreateDatasetOperationMetadata", + "CreateDatasetRequest", + "CreateModelOperationMetadata", + "CreateModelRequest", + "Dataset", + "DeleteDatasetRequest", + "DeleteModelRequest", + "DeleteOperationMetadata", + "DeployModelOperationMetadata", + "DeployModelRequest", + "Document", + "DocumentDimensions", + "DocumentInputConfig", + "ExamplePayload", + "ExportDataOperationMetadata", + "ExportDataRequest", + "ExportModelOperationMetadata", + "ExportModelRequest", + "GcsDestination", + "GcsSource", + "GetAnnotationSpecRequest", + "GetDatasetRequest", + "GetModelEvaluationRequest", + "GetModelRequest", + "Image", + "ImageClassificationDatasetMetadata", + "ImageClassificationModelDeploymentMetadata", + "ImageClassificationModelMetadata", + "ImageObjectDetectionAnnotation", + "ImageObjectDetectionDatasetMetadata", + "ImageObjectDetectionEvaluationMetrics", + "ImageObjectDetectionModelDeploymentMetadata", + "ImageObjectDetectionModelMetadata", + "ImportDataOperationMetadata", + "ImportDataRequest", + "InputConfig", + "ListDatasetsRequest", + "ListDatasetsResponse", + "ListModelEvaluationsRequest", + "ListModelEvaluationsResponse", + "ListModelsRequest", + "ListModelsResponse", + "Model", + "ModelEvaluation", + "ModelExportOutputConfig", + "NormalizedVertex", + "OperationMetadata", + "OutputConfig", + "PredictRequest", + "PredictResponse", "PredictionServiceClient", + "TextClassificationDatasetMetadata", + "TextClassificationModelMetadata", + "TextExtractionAnnotation", + "TextExtractionDatasetMetadata", + "TextExtractionEvaluationMetrics", + "TextExtractionModelMetadata", + "TextSegment", + "TextSentimentAnnotation", + "TextSentimentDatasetMetadata", + "TextSentimentEvaluationMetrics", + "TextSentimentModelMetadata", + "TextSnippet", + "TranslationAnnotation", + "TranslationDatasetMetadata", + "TranslationEvaluationMetrics", + "TranslationModelMetadata", + "UndeployModelOperationMetadata", + "UndeployModelRequest", + "UpdateDatasetRequest", + "UpdateModelRequest", "AutoMlClient", ) diff --git a/google/cloud/automl_v1/gapic/auto_ml_client.py b/google/cloud/automl_v1/gapic/auto_ml_client.py deleted file mode 100644 index a870b6bf..00000000 --- a/google/cloud/automl_v1/gapic/auto_ml_client.py +++ /dev/null @@ -1,1911 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.cloud.automl.v1 AutoMl API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import google.api_core.protobuf_helpers -import grpc - -from google.cloud.automl_v1.gapic import auto_ml_client_config -from google.cloud.automl_v1.gapic import enums -from google.cloud.automl_v1.gapic.transports import auto_ml_grpc_transport -from google.cloud.automl_v1.proto import annotation_spec_pb2 -from google.cloud.automl_v1.proto import data_items_pb2 -from google.cloud.automl_v1.proto import dataset_pb2 -from google.cloud.automl_v1.proto import image_pb2 -from google.cloud.automl_v1.proto import io_pb2 -from google.cloud.automl_v1.proto import model_evaluation_pb2 -from google.cloud.automl_v1.proto import model_pb2 -from google.cloud.automl_v1.proto import operations_pb2 as proto_operations_pb2 -from google.cloud.automl_v1.proto import prediction_service_pb2 -from google.cloud.automl_v1.proto import prediction_service_pb2_grpc -from google.cloud.automl_v1.proto import service_pb2 -from google.cloud.automl_v1.proto import service_pb2_grpc -from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl",).version - - -class AutoMlClient(object): - """ - AutoML Server API. - - The resource names are assigned by the server. The server never reuses - names that it has created after the resources with those names are - deleted. - - An ID of a resource is the last element of the item's resource name. For - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, - then the id for the item is ``{dataset_id}``. - - Currently the only supported ``location_id`` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - """ - - SERVICE_ADDRESS = "automl.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.cloud.automl.v1.AutoMl" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoMlClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def annotation_spec_path(cls, project, location, dataset, annotation_spec): - """Return a fully-qualified annotation_spec string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}", - project=project, - location=location, - dataset=dataset, - annotation_spec=annotation_spec, - ) - - @classmethod - def dataset_path(cls, project, location, dataset): - """Return a fully-qualified dataset string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}/datasets/{dataset}", - project=project, - location=location, - dataset=dataset, - ) - - @classmethod - def location_path(cls, project, location): - """Return a fully-qualified location string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}", - project=project, - location=location, - ) - - @classmethod - def model_path(cls, project, location, model): - """Return a fully-qualified model string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}/models/{model}", - project=project, - location=location, - model=model, - ) - - @classmethod - def model_evaluation_path(cls, project, location, model, model_evaluation): - """Return a fully-qualified model_evaluation string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}", - project=project, - location=location, - model=model, - model_evaluation=model_evaluation, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.AutoMlGrpcTransport, - Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = auto_ml_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=auto_ml_grpc_transport.AutoMlGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_dataset( - self, - parent, - dataset, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a dataset. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') - >>> - >>> # TODO: Initialize `dataset`: - >>> dataset = {} - >>> - >>> response = client.create_dataset(parent, dataset) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The resource name of the project to create the dataset for. - dataset (Union[dict, ~google.cloud.automl_v1.types.Dataset]): Required. The dataset to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.Dataset` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_dataset" not in self._inner_api_calls: - self._inner_api_calls[ - "create_dataset" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_dataset, - default_retry=self._method_configs["CreateDataset"].retry, - default_timeout=self._method_configs["CreateDataset"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_dataset"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - dataset_pb2.Dataset, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def get_dataset( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a dataset. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') - >>> - >>> response = client.get_dataset(name) - - Args: - name (str): Required. The resource name of the dataset to retrieve. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types.Dataset` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_dataset" not in self._inner_api_calls: - self._inner_api_calls[ - "get_dataset" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_dataset, - default_retry=self._method_configs["GetDataset"].retry, - default_timeout=self._method_configs["GetDataset"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.GetDatasetRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_dataset"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_datasets( - self, - parent, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists datasets in a project. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') - >>> - >>> # Iterate over all results - >>> for element in client.list_datasets(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_datasets(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The resource name of the project from which to list datasets. - filter_ (str): An expression for filtering the results of the request. - - - ``dataset_metadata`` - for existence of the case (e.g. - ````image_classification_dataset_metadata````). Some examples of using the - filter are: - - - ``translation_dataset_metadata:*`` --> The dataset has - translation_dataset_metadata. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1.types.Dataset` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_datasets" not in self._inner_api_calls: - self._inner_api_calls[ - "list_datasets" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_datasets, - default_retry=self._method_configs["ListDatasets"].retry, - default_timeout=self._method_configs["ListDatasets"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.ListDatasetsRequest( - parent=parent, filter=filter_, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_datasets"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="datasets", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def update_dataset( - self, - dataset, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a dataset. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> # TODO: Initialize `dataset`: - >>> dataset = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_dataset(dataset, update_mask) - - Args: - dataset (Union[dict, ~google.cloud.automl_v1.types.Dataset]): Required. The dataset which replaces the resource on the server. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.Dataset` - update_mask (Union[dict, ~google.cloud.automl_v1.types.FieldMask]): Required. The update mask applies to the resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types.Dataset` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_dataset" not in self._inner_api_calls: - self._inner_api_calls[ - "update_dataset" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_dataset, - default_retry=self._method_configs["UpdateDataset"].retry, - default_timeout=self._method_configs["UpdateDataset"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.UpdateDatasetRequest( - dataset=dataset, update_mask=update_mask, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("dataset.name", dataset.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_dataset"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_dataset( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a dataset and all of its contents. Returns empty response in - the ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') - >>> - >>> response = client.delete_dataset(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. The resource name of the dataset to delete. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_dataset" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_dataset" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_dataset, - default_retry=self._method_configs["DeleteDataset"].retry, - default_timeout=self._method_configs["DeleteDataset"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.DeleteDatasetRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["delete_dataset"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def import_data( - self, - name, - input_config, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Imports data into a dataset. For Tables this method can only be - called on an empty Dataset. - - For Tables: - - - A ``schema_inference_version`` parameter must be explicitly set. - Returns an empty response in the ``response`` field when it - completes. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') - >>> - >>> # TODO: Initialize `input_config`: - >>> input_config = {} - >>> - >>> response = client.import_data(name, input_config) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. Dataset name. Dataset must already exist. All imported - annotations and examples will be added. - input_config (Union[dict, ~google.cloud.automl_v1.types.InputConfig]): Required. The desired input location and its domain specific semantics, - if any. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.InputConfig` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "import_data" not in self._inner_api_calls: - self._inner_api_calls[ - "import_data" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.import_data, - default_retry=self._method_configs["ImportData"].retry, - default_timeout=self._method_configs["ImportData"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.ImportDataRequest(name=name, input_config=input_config,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["import_data"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def export_data( - self, - name, - output_config, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Exports dataset's data to the provided output location. Returns an - empty response in the ``response`` field when it completes. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') - >>> - >>> # TODO: Initialize `output_config`: - >>> output_config = {} - >>> - >>> response = client.export_data(name, output_config) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. The resource name of the dataset. - output_config (Union[dict, ~google.cloud.automl_v1.types.OutputConfig]): Required. The desired output location. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.OutputConfig` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "export_data" not in self._inner_api_calls: - self._inner_api_calls[ - "export_data" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.export_data, - default_retry=self._method_configs["ExportData"].retry, - default_timeout=self._method_configs["ExportData"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.ExportDataRequest(name=name, output_config=output_config,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["export_data"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def get_annotation_spec( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets an annotation spec. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> name = client.annotation_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[ANNOTATION_SPEC]') - >>> - >>> response = client.get_annotation_spec(name) - - Args: - name (str): Required. The resource name of the annotation spec to retrieve. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types.AnnotationSpec` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_annotation_spec" not in self._inner_api_calls: - self._inner_api_calls[ - "get_annotation_spec" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_annotation_spec, - default_retry=self._method_configs["GetAnnotationSpec"].retry, - default_timeout=self._method_configs["GetAnnotationSpec"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.GetAnnotationSpecRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_annotation_spec"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_model( - self, - parent, - model, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a model. Returns a Model in the ``response`` field when it - completes. When you create a model, several model evaluations are - created for it: a global evaluation, and one evaluation for each - annotation spec. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') - >>> - >>> # TODO: Initialize `model`: - >>> model = {} - >>> - >>> response = client.create_model(parent, model) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. Resource name of the parent project where the model is being created. - model (Union[dict, ~google.cloud.automl_v1.types.Model]): Required. The model to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.Model` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_model" not in self._inner_api_calls: - self._inner_api_calls[ - "create_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_model, - default_retry=self._method_configs["CreateModel"].retry, - default_timeout=self._method_configs["CreateModel"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.CreateModelRequest(parent=parent, model=model,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - model_pb2.Model, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def get_model( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a model. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> response = client.get_model(name) - - Args: - name (str): Required. Resource name of the model. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types.Model` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_model" not in self._inner_api_calls: - self._inner_api_calls[ - "get_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_model, - default_retry=self._method_configs["GetModel"].retry, - default_timeout=self._method_configs["GetModel"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.GetModelRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_models( - self, - parent, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists models. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') - >>> - >>> # Iterate over all results - >>> for element in client.list_models(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_models(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. Resource name of the project, from which to list the models. - filter_ (str): An expression for filtering the results of the request. - - - ``model_metadata`` - for existence of the case (e.g. - ````video_classification_model_metadata:*````). - - - ``dataset_id`` - for = or !=. Some examples of using the filter are: - - - ``image_classification_model_metadata:*`` --> The model has - image_classification_model_metadata. - - - ``dataset_id=5`` --> The model was created from a dataset with ID 5. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1.types.Model` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_models" not in self._inner_api_calls: - self._inner_api_calls[ - "list_models" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_models, - default_retry=self._method_configs["ListModels"].retry, - default_timeout=self._method_configs["ListModels"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.ListModelsRequest( - parent=parent, filter=filter_, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_models"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="model", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def delete_model( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a model. Returns ``google.protobuf.Empty`` in the - ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> response = client.delete_model(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. Resource name of the model being deleted. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_model" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_model, - default_retry=self._method_configs["DeleteModel"].retry, - default_timeout=self._method_configs["DeleteModel"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.DeleteModelRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["delete_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def update_model( - self, - model, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a model. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> # TODO: Initialize `model`: - >>> model = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_model(model, update_mask) - - Args: - model (Union[dict, ~google.cloud.automl_v1.types.Model]): Required. The model which replaces the resource on the server. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.Model` - update_mask (Union[dict, ~google.cloud.automl_v1.types.FieldMask]): Required. The update mask applies to the resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types.Model` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_model" not in self._inner_api_calls: - self._inner_api_calls[ - "update_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_model, - default_retry=self._method_configs["UpdateModel"].retry, - default_timeout=self._method_configs["UpdateModel"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.UpdateModelRequest(model=model, update_mask=update_mask,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("model.name", model.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def deploy_model( - self, - name, - image_object_detection_model_deployment_metadata=None, - image_classification_model_deployment_metadata=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deploys a model. If a model is already deployed, deploying it with - the same parameters has no effect. Deploying with different parametrs - (as e.g. changing - - ``node_number``) will reset the deployment state without pausing the - model's availability. - - Only applicable for Text Classification, Image Object Detection , - Tables, and Image Segmentation; all other domains manage deployment - automatically. - - Returns an empty response in the ``response`` field when it completes. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> response = client.deploy_model(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. Resource name of the model to deploy. - image_object_detection_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1.types.ImageObjectDetectionModelDeploymentMetadata]): Model deployment metadata specific to Image Object Detection. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.ImageObjectDetectionModelDeploymentMetadata` - image_classification_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1.types.ImageClassificationModelDeploymentMetadata]): Model deployment metadata specific to Image Classification. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.ImageClassificationModelDeploymentMetadata` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "deploy_model" not in self._inner_api_calls: - self._inner_api_calls[ - "deploy_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.deploy_model, - default_retry=self._method_configs["DeployModel"].retry, - default_timeout=self._method_configs["DeployModel"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, - image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, - ) - - request = service_pb2.DeployModelRequest( - name=name, - image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, - image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["deploy_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def undeploy_model( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Undeploys a model. If the model is not deployed this method has no - effect. - - Only applicable for Text Classification, Image Object Detection and - Tables; all other domains manage deployment automatically. - - Returns an empty response in the ``response`` field when it completes. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> response = client.undeploy_model(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. Resource name of the model to undeploy. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "undeploy_model" not in self._inner_api_calls: - self._inner_api_calls[ - "undeploy_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.undeploy_model, - default_retry=self._method_configs["UndeployModel"].retry, - default_timeout=self._method_configs["UndeployModel"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.UndeployModelRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["undeploy_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def export_model( - self, - name, - output_config, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Exports a trained, "export-able", model to a user specified Google - Cloud Storage location. A model is considered export-able if and only if - it has an export format defined for it in ``ModelExportOutputConfig``. - - Returns an empty response in the ``response`` field when it completes. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> # TODO: Initialize `output_config`: - >>> output_config = {} - >>> - >>> response = client.export_model(name, output_config) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. The resource name of the model to export. - output_config (Union[dict, ~google.cloud.automl_v1.types.ModelExportOutputConfig]): Required. The desired output location and configuration. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.ModelExportOutputConfig` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "export_model" not in self._inner_api_calls: - self._inner_api_calls[ - "export_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.export_model, - default_retry=self._method_configs["ExportModel"].retry, - default_timeout=self._method_configs["ExportModel"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.ExportModelRequest( - name=name, output_config=output_config, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["export_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def get_model_evaluation( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a model evaluation. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]') - >>> - >>> response = client.get_model_evaluation(name) - - Args: - name (str): Required. Resource name for the model evaluation. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types.ModelEvaluation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_model_evaluation" not in self._inner_api_calls: - self._inner_api_calls[ - "get_model_evaluation" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_model_evaluation, - default_retry=self._method_configs["GetModelEvaluation"].retry, - default_timeout=self._method_configs["GetModelEvaluation"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.GetModelEvaluationRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_model_evaluation"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_model_evaluations( - self, - parent, - filter_, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists model evaluations. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> # TODO: Initialize `filter_`: - >>> filter_ = '' - >>> - >>> # Iterate over all results - >>> for element in client.list_model_evaluations(parent, filter_): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_model_evaluations(parent, filter_).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. Resource name of the model to list the model evaluations for. - If modelId is set as "-", this will list model evaluations from across all - models of the parent location. - filter_ (str): Required. An expression for filtering the results of the request. - - - ``annotation_spec_id`` - for =, != or existence. See example below - for the last. - - Some examples of using the filter are: - - - ``annotation_spec_id!=4`` --> The model evaluation was done for - annotation spec with ID different than 4. - - ``NOT annotation_spec_id:*`` --> The model evaluation was done for - aggregate of all annotation specs. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1.types.ModelEvaluation` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_model_evaluations" not in self._inner_api_calls: - self._inner_api_calls[ - "list_model_evaluations" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_model_evaluations, - default_retry=self._method_configs["ListModelEvaluations"].retry, - default_timeout=self._method_configs["ListModelEvaluations"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.ListModelEvaluationsRequest( - parent=parent, filter=filter_, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_model_evaluations"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="model_evaluation", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator diff --git a/google/cloud/automl_v1/gapic/auto_ml_client_config.py b/google/cloud/automl_v1/gapic/auto_ml_client_config.py deleted file mode 100644 index 0c89b881..00000000 --- a/google/cloud/automl_v1/gapic/auto_ml_client_config.py +++ /dev/null @@ -1,132 +0,0 @@ -config = { - "interfaces": { - "google.cloud.automl.v1.AutoMl": { - "retry_codes": { - "retry_policy_1_codes": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "no_retry_codes": [], - "no_retry_1_codes": [], - }, - "retry_params": { - "retry_policy_1_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 5000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 5000, - "total_timeout_millis": 5000, - }, - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - "no_retry_1_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 5000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 5000, - "total_timeout_millis": 5000, - }, - }, - "methods": { - "CreateDataset": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "GetDataset": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ListDatasets": { - "timeout_millis": 50000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "UpdateDataset": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "DeleteDataset": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ImportData": { - "timeout_millis": 20000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "ExportData": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "GetAnnotationSpec": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "CreateModel": { - "timeout_millis": 20000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "GetModel": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ListModels": { - "timeout_millis": 50000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "DeleteModel": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "UpdateModel": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "DeployModel": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "UndeployModel": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "ExportModel": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "GetModelEvaluation": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ListModelEvaluations": { - "timeout_millis": 50000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - }, - } - } -} diff --git a/google/cloud/automl_v1/gapic/enums.py b/google/cloud/automl_v1/gapic/enums.py deleted file mode 100644 index 7bb5f2cb..00000000 --- a/google/cloud/automl_v1/gapic/enums.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class ClassificationType(enum.IntEnum): - """ - Type of the classification problem. - - Attributes: - CLASSIFICATION_TYPE_UNSPECIFIED (int): An un-set value of this enum. - MULTICLASS (int): At most one label is allowed per example. - MULTILABEL (int): Multiple labels are allowed for one example. - """ - - CLASSIFICATION_TYPE_UNSPECIFIED = 0 - MULTICLASS = 1 - MULTILABEL = 2 - - -class Document(object): - class Layout(object): - class TextSegmentType(enum.IntEnum): - """ - The type of TextSegment in the context of the original document. - - Attributes: - TEXT_SEGMENT_TYPE_UNSPECIFIED (int): Should not be used. - TOKEN (int): The text segment is a token. e.g. word. - PARAGRAPH (int): The text segment is a paragraph. - FORM_FIELD (int): The text segment is a form field. - FORM_FIELD_NAME (int): The text segment is the name part of a form field. It will be - treated as child of another FORM_FIELD TextSegment if its span is - subspan of another TextSegment with type FORM_FIELD. - FORM_FIELD_CONTENTS (int): The text segment is the text content part of a form field. It will - be treated as child of another FORM_FIELD TextSegment if its span is - subspan of another TextSegment with type FORM_FIELD. - TABLE (int): The text segment is a whole table, including headers, and all rows. - TABLE_HEADER (int): The text segment is a table's headers. It will be treated as child of - another TABLE TextSegment if its span is subspan of another TextSegment - with type TABLE. - TABLE_ROW (int): The text segment is a row in table. It will be treated as child of - another TABLE TextSegment if its span is subspan of another TextSegment - with type TABLE. - TABLE_CELL (int): The text segment is a cell in table. It will be treated as child of - another TABLE_ROW TextSegment if its span is subspan of another - TextSegment with type TABLE_ROW. - """ - - TEXT_SEGMENT_TYPE_UNSPECIFIED = 0 - TOKEN = 1 - PARAGRAPH = 2 - FORM_FIELD = 3 - FORM_FIELD_NAME = 4 - FORM_FIELD_CONTENTS = 5 - TABLE = 6 - TABLE_HEADER = 7 - TABLE_ROW = 8 - TABLE_CELL = 9 - - -class DocumentDimensions(object): - class DocumentDimensionUnit(enum.IntEnum): - """ - Unit of the document dimension. - - Attributes: - DOCUMENT_DIMENSION_UNIT_UNSPECIFIED (int): Should not be used. - INCH (int): Document dimension is measured in inches. - CENTIMETER (int): Document dimension is measured in centimeters. - POINT (int): Document dimension is measured in points. 72 points = 1 inch. - """ - - DOCUMENT_DIMENSION_UNIT_UNSPECIFIED = 0 - INCH = 1 - CENTIMETER = 2 - POINT = 3 - - -class Model(object): - class DeploymentState(enum.IntEnum): - """ - Deployment state of the model. - - Attributes: - DEPLOYMENT_STATE_UNSPECIFIED (int): Should not be used, an un-set enum has this value by default. - DEPLOYED (int): Model is deployed. - UNDEPLOYED (int): Model is not deployed. - """ - - DEPLOYMENT_STATE_UNSPECIFIED = 0 - DEPLOYED = 1 - UNDEPLOYED = 2 diff --git a/google/cloud/automl_v1/gapic/prediction_service_client.py b/google/cloud/automl_v1/gapic/prediction_service_client.py deleted file mode 100644 index 06686df3..00000000 --- a/google/cloud/automl_v1/gapic/prediction_service_client.py +++ /dev/null @@ -1,534 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.cloud.automl.v1 PredictionService API.""" - -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.path_template -import grpc - -from google.cloud.automl_v1.gapic import enums -from google.cloud.automl_v1.gapic import prediction_service_client_config -from google.cloud.automl_v1.gapic.transports import prediction_service_grpc_transport -from google.cloud.automl_v1.proto import data_items_pb2 -from google.cloud.automl_v1.proto import io_pb2 -from google.cloud.automl_v1.proto import operations_pb2 as proto_operations_pb2 -from google.cloud.automl_v1.proto import prediction_service_pb2 -from google.cloud.automl_v1.proto import prediction_service_pb2_grpc -from google.longrunning import operations_pb2 as longrunning_operations_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl",).version - - -class PredictionServiceClient(object): - """ - AutoML Prediction API. - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - """ - - SERVICE_ADDRESS = "automl.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.cloud.automl.v1.PredictionService" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def model_path(cls, project, location, model): - """Return a fully-qualified model string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}/models/{model}", - project=project, - location=location, - model=model, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.PredictionServiceGrpcTransport, - Callable[[~.Credentials, type], ~.PredictionServiceGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = prediction_service_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=prediction_service_grpc_transport.PredictionServiceGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = prediction_service_grpc_transport.PredictionServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def predict( - self, - name, - payload, - params=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Perform an online prediction. The prediction result is directly - returned in the response. Available for following ML scenarios, and - their expected request payloads: - - AutoML Vision Classification - - - An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. - - AutoML Vision Object Detection - - - An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. - - AutoML Natural Language Classification - - - A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in - .PDF, .TIF or .TIFF format with size upto 2MB. - - AutoML Natural Language Entity Extraction - - - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a - document in .PDF, .TIF or .TIFF format with size upto 20MB. - - AutoML Natural Language Sentiment Analysis - - - A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in - .PDF, .TIF or .TIFF format with size upto 2MB. - - AutoML Translation - - - A TextSnippet up to 25,000 characters, UTF-8 encoded. - - AutoML Tables - - - A row with column values matching the columns of the model, up to - 5MB. Not available for FORECASTING ``prediction_type``. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.PredictionServiceClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> # TODO: Initialize `payload`: - >>> payload = {} - >>> - >>> response = client.predict(name, payload) - - Args: - name (str): Required. Name of the model requested to serve the prediction. - payload (Union[dict, ~google.cloud.automl_v1.types.ExamplePayload]): Required. Payload to perform a prediction on. The payload must match the - problem type that the model was trained to solve. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.ExamplePayload` - params (dict[str -> str]): Additional domain-specific parameters, any string must be up to - 25000 characters long. - - AutoML Vision Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. When the model - makes predictions for an image, it will only produce results that have - at least this confidence score. The default is 0.5. - - AutoML Vision Object Detection - - ``score_threshold`` : (float) When Model detects objects on the image, - it will only produce bounding boxes which have at least this confidence - score. Value in 0 to 1 range, default is 0.5. - - ``max_bounding_box_count`` : (int64) The maximum number of bounding - boxes returned. The default is 100. The number of returned bounding - boxes might be limited by the server. - - AutoML Tables - - ``feature_importance`` : (boolean) Whether - - ``feature_importance`` is populated in the returned list of - ``TablesAnnotation`` objects. The default is false. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types.PredictResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "predict" not in self._inner_api_calls: - self._inner_api_calls[ - "predict" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.predict, - default_retry=self._method_configs["Predict"].retry, - default_timeout=self._method_configs["Predict"].timeout, - client_info=self._client_info, - ) - - request = prediction_service_pb2.PredictRequest( - name=name, payload=payload, params=params, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["predict"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def batch_predict( - self, - name, - input_config, - output_config, - params=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Perform a batch prediction. Unlike the online ``Predict``, batch - prediction result won't be immediately available in the response. - Instead, a long running operation object is returned. User can poll the - operation result via ``GetOperation`` method. Once the operation is - done, ``BatchPredictResult`` is returned in the ``response`` field. - Available for following ML scenarios: - - - AutoML Vision Classification - - AutoML Vision Object Detection - - AutoML Video Intelligence Classification - - AutoML Video Intelligence Object Tracking \* AutoML Natural Language - Classification - - AutoML Natural Language Entity Extraction - - AutoML Natural Language Sentiment Analysis - - AutoML Tables - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.PredictionServiceClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> # TODO: Initialize `input_config`: - >>> input_config = {} - >>> - >>> # TODO: Initialize `output_config`: - >>> output_config = {} - >>> - >>> response = client.batch_predict(name, input_config, output_config) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. Name of the model requested to serve the batch prediction. - input_config (Union[dict, ~google.cloud.automl_v1.types.BatchPredictInputConfig]): Required. The input configuration for batch prediction. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.BatchPredictInputConfig` - output_config (Union[dict, ~google.cloud.automl_v1.types.BatchPredictOutputConfig]): Required. The Configuration specifying where output predictions should - be written. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.BatchPredictOutputConfig` - params (dict[str -> str]): Additional domain-specific parameters for the predictions, any - string must be up to 25000 characters long. - - AutoML Natural Language Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. When the model - makes predictions for a text snippet, it will only produce results that - have at least this confidence score. The default is 0.5. - - AutoML Vision Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. When the model - makes predictions for an image, it will only produce results that have - at least this confidence score. The default is 0.5. - - AutoML Vision Object Detection - - ``score_threshold`` : (float) When Model detects objects on the image, - it will only produce bounding boxes which have at least this confidence - score. Value in 0 to 1 range, default is 0.5. - - ``max_bounding_box_count`` : (int64) The maximum number of bounding - boxes returned per image. The default is 100, the number of bounding - boxes returned might be limited by the server. AutoML Video Intelligence - Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. When the model - makes predictions for a video, it will only produce results that have at - least this confidence score. The default is 0.5. - - ``segment_classification`` : (boolean) Set to true to request - segment-level classification. AutoML Video Intelligence returns labels - and their confidence scores for the entire segment of the video that - user specified in the request configuration. The default is true. - - ``shot_classification`` : (boolean) Set to true to request shot-level - classification. AutoML Video Intelligence determines the boundaries for - each camera shot in the entire segment of the video that user specified - in the request configuration. AutoML Video Intelligence then returns - labels and their confidence scores for each detected shot, along with - the start and end time of the shot. The default is false. - - WARNING: Model evaluation is not done for this classification type, the - quality of it depends on training data, but there are no metrics - provided to describe that quality. - - ``1s_interval_classification`` : (boolean) Set to true to request - classification for a video at one-second intervals. AutoML Video - Intelligence returns labels and their confidence scores for each second - of the entire segment of the video that user specified in the request - configuration. The default is false. - - WARNING: Model evaluation is not done for this classification type, the - quality of it depends on training data, but there are no metrics - provided to describe that quality. - - AutoML Video Intelligence Object Tracking - - ``score_threshold`` : (float) When Model detects objects on video - frames, it will only produce bounding boxes which have at least this - confidence score. Value in 0 to 1 range, default is 0.5. - - ``max_bounding_box_count`` : (int64) The maximum number of bounding - boxes returned per image. The default is 100, the number of bounding - boxes returned might be limited by the server. - - ``min_bounding_box_size`` : (float) Only bounding boxes with shortest - edge at least that long as a relative value of video frame size are - returned. Value in 0 to 1 range. Default is 0. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "batch_predict" not in self._inner_api_calls: - self._inner_api_calls[ - "batch_predict" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.batch_predict, - default_retry=self._method_configs["BatchPredict"].retry, - default_timeout=self._method_configs["BatchPredict"].timeout, - client_info=self._client_info, - ) - - request = prediction_service_pb2.BatchPredictRequest( - name=name, - input_config=input_config, - output_config=output_config, - params=params, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["batch_predict"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - prediction_service_pb2.BatchPredictResult, - metadata_type=proto_operations_pb2.OperationMetadata, - ) diff --git a/google/cloud/automl_v1/gapic/prediction_service_client_config.py b/google/cloud/automl_v1/gapic/prediction_service_client_config.py deleted file mode 100644 index e4b1a44f..00000000 --- a/google/cloud/automl_v1/gapic/prediction_service_client_config.py +++ /dev/null @@ -1,39 +0,0 @@ -config = { - "interfaces": { - "google.cloud.automl.v1.PredictionService": { - "retry_codes": {"no_retry_2_codes": [], "no_retry_codes": []}, - "retry_params": { - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - "no_retry_2_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 60000, - }, - }, - "methods": { - "Predict": { - "timeout_millis": 60000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - "BatchPredict": { - "timeout_millis": 20000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - }, - } - } -} diff --git a/google/cloud/automl_v1/gapic/transports/__init__.py b/google/cloud/automl_v1/gapic/transports/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/automl_v1/gapic/transports/auto_ml_grpc_transport.py b/google/cloud/automl_v1/gapic/transports/auto_ml_grpc_transport.py deleted file mode 100644 index 6ebffac5..00000000 --- a/google/cloud/automl_v1/gapic/transports/auto_ml_grpc_transport.py +++ /dev/null @@ -1,386 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.automl_v1.proto import service_pb2_grpc - - -class AutoMlGrpcTransport(object): - """gRPC transport class providing stubs for - google.cloud.automl.v1 AutoMl API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="automl.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "auto_ml_stub": service_pb2_grpc.AutoMlStub(channel), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="automl.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.create_dataset`. - - Creates a dataset. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].CreateDataset - - @property - def get_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_dataset`. - - Gets a dataset. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].GetDataset - - @property - def list_datasets(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_datasets`. - - Lists datasets in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ListDatasets - - @property - def update_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_dataset`. - - Updates a dataset. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].UpdateDataset - - @property - def delete_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.delete_dataset`. - - Deletes a dataset and all of its contents. Returns empty response in - the ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].DeleteDataset - - @property - def import_data(self): - """Return the gRPC stub for :meth:`AutoMlClient.import_data`. - - Imports data into a dataset. For Tables this method can only be - called on an empty Dataset. - - For Tables: - - - A ``schema_inference_version`` parameter must be explicitly set. - Returns an empty response in the ``response`` field when it - completes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ImportData - - @property - def export_data(self): - """Return the gRPC stub for :meth:`AutoMlClient.export_data`. - - Exports dataset's data to the provided output location. Returns an - empty response in the ``response`` field when it completes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ExportData - - @property - def get_annotation_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_annotation_spec`. - - Gets an annotation spec. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].GetAnnotationSpec - - @property - def create_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.create_model`. - - Creates a model. Returns a Model in the ``response`` field when it - completes. When you create a model, several model evaluations are - created for it: a global evaluation, and one evaluation for each - annotation spec. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].CreateModel - - @property - def get_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_model`. - - Gets a model. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].GetModel - - @property - def list_models(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_models`. - - Lists models. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ListModels - - @property - def delete_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.delete_model`. - - Deletes a model. Returns ``google.protobuf.Empty`` in the - ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].DeleteModel - - @property - def update_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_model`. - - Updates a model. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].UpdateModel - - @property - def deploy_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.deploy_model`. - - Deploys a model. If a model is already deployed, deploying it with - the same parameters has no effect. Deploying with different parametrs - (as e.g. changing - - ``node_number``) will reset the deployment state without pausing the - model's availability. - - Only applicable for Text Classification, Image Object Detection , - Tables, and Image Segmentation; all other domains manage deployment - automatically. - - Returns an empty response in the ``response`` field when it completes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].DeployModel - - @property - def undeploy_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.undeploy_model`. - - Undeploys a model. If the model is not deployed this method has no - effect. - - Only applicable for Text Classification, Image Object Detection and - Tables; all other domains manage deployment automatically. - - Returns an empty response in the ``response`` field when it completes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].UndeployModel - - @property - def export_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.export_model`. - - Exports a trained, "export-able", model to a user specified Google - Cloud Storage location. A model is considered export-able if and only if - it has an export format defined for it in ``ModelExportOutputConfig``. - - Returns an empty response in the ``response`` field when it completes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ExportModel - - @property - def get_model_evaluation(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_model_evaluation`. - - Gets a model evaluation. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].GetModelEvaluation - - @property - def list_model_evaluations(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_model_evaluations`. - - Lists model evaluations. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ListModelEvaluations diff --git a/google/cloud/automl_v1/gapic/transports/prediction_service_grpc_transport.py b/google/cloud/automl_v1/gapic/transports/prediction_service_grpc_transport.py deleted file mode 100644 index c94538be..00000000 --- a/google/cloud/automl_v1/gapic/transports/prediction_service_grpc_transport.py +++ /dev/null @@ -1,192 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.automl_v1.proto import prediction_service_pb2_grpc - - -class PredictionServiceGrpcTransport(object): - """gRPC transport class providing stubs for - google.cloud.automl.v1 PredictionService API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="automl.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "prediction_service_stub": prediction_service_pb2_grpc.PredictionServiceStub( - channel - ), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="automl.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def predict(self): - """Return the gRPC stub for :meth:`PredictionServiceClient.predict`. - - Perform an online prediction. The prediction result is directly - returned in the response. Available for following ML scenarios, and - their expected request payloads: - - AutoML Vision Classification - - - An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. - - AutoML Vision Object Detection - - - An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. - - AutoML Natural Language Classification - - - A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in - .PDF, .TIF or .TIFF format with size upto 2MB. - - AutoML Natural Language Entity Extraction - - - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a - document in .PDF, .TIF or .TIFF format with size upto 20MB. - - AutoML Natural Language Sentiment Analysis - - - A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in - .PDF, .TIF or .TIFF format with size upto 2MB. - - AutoML Translation - - - A TextSnippet up to 25,000 characters, UTF-8 encoded. - - AutoML Tables - - - A row with column values matching the columns of the model, up to - 5MB. Not available for FORECASTING ``prediction_type``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["prediction_service_stub"].Predict - - @property - def batch_predict(self): - """Return the gRPC stub for :meth:`PredictionServiceClient.batch_predict`. - - Perform a batch prediction. Unlike the online ``Predict``, batch - prediction result won't be immediately available in the response. - Instead, a long running operation object is returned. User can poll the - operation result via ``GetOperation`` method. Once the operation is - done, ``BatchPredictResult`` is returned in the ``response`` field. - Available for following ML scenarios: - - - AutoML Vision Classification - - AutoML Vision Object Detection - - AutoML Video Intelligence Classification - - AutoML Video Intelligence Object Tracking \* AutoML Natural Language - Classification - - AutoML Natural Language Entity Extraction - - AutoML Natural Language Sentiment Analysis - - AutoML Tables - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["prediction_service_stub"].BatchPredict diff --git a/google/cloud/automl_v1/proto/__init__.py b/google/cloud/automl_v1/proto/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/automl_v1/proto/annotation_payload_pb2.py b/google/cloud/automl_v1/proto/annotation_payload_pb2.py deleted file mode 100644 index 8cdfd04f..00000000 --- a/google/cloud/automl_v1/proto/annotation_payload_pb2.py +++ /dev/null @@ -1,316 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1/proto/annotation_payload.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.automl_v1.proto import ( - classification_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_classification__pb2, -) -from google.cloud.automl_v1.proto import ( - detection_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_detection__pb2, -) -from google.cloud.automl_v1.proto import ( - text_extraction_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_text__extraction__pb2, -) -from google.cloud.automl_v1.proto import ( - text_sentiment_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_text__sentiment__pb2, -) -from google.cloud.automl_v1.proto import ( - translation_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_translation__pb2, -) -from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1/proto/annotation_payload.proto", - package="google.cloud.automl.v1", - syntax="proto3", - serialized_options=b"\n\032com.google.cloud.automl.v1P\001Z AutoML Vision .. raw:: html .. raw:: - html
.. raw:: html
- .. raw:: html
Classification .. raw:: html
See - `Preparing your training data - `__ for more - information. CSV file(s) with each line in format: :: - ML_USE,GCS_FILE_PATH,LABEL,LABEL,... - ``ML_USE`` - Identifies the - data set that the current row (file) applies to. This value can be - one of the following: - ``TRAIN`` - Rows in this file are used to - train the model. - ``TEST`` - Rows in this file are used to test - the model during training. - ``UNASSIGNED`` - Rows in this - file are not categorized. They are Automatically divided into - train and test data. 80% for training and 20% for testing. - - ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image of - up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP, - .BMP, .TIFF, .ICO. - ``LABEL`` - A label that identifies the object - in the image. For the ``MULTICLASS`` classification type, at most one - ``LABEL`` is allowed per image. If an image has not yet been labeled, - then it should be mentioned just once with no ``LABEL``. Some sample - rows: :: TRAIN,gs://folder/image1.jpg,daisy - TEST,gs://folder/image2.jpg,dandelion,tulip,rose - UNASSIGNED,gs://folder/image3.jpg,daisy - UNASSIGNED,gs://folder/image4.jpg .. raw:: html
.. - raw:: html
.. raw:: html
Object Detection .. - raw:: html
See `Preparing your training data - `__ for more information. A CSV file(s) with - each line in format: :: - ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,) - ``ML_USE`` - - Identifies the data set that the current row (file) applies to. - This value can be one of the following: - ``TRAIN`` - Rows in - this file are used to train the model. - ``TEST`` - Rows in this - file are used to test the model during training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They are - Automatically divided into train and test data. 80% for training - and 20% for testing. - ``GCS_FILE_PATH`` - The Google Cloud Storage - location of an image of up to 30MB in size. Supported extensions: - .JPEG, .GIF, .PNG. Each image is assumed to be exhaustively - labeled. - ``LABEL`` - A label that identifies the object in the - image specified by the ``BOUNDING_BOX``. - ``BOUNDING BOX`` - The - vertices of an object in the example image. The minimum allowed - ``BOUNDING_BOX`` edge length is 0.01, and no more than 500 - ``BOUNDING_BOX`` instances per image are allowed (one - ``BOUNDING_BOX`` per line). If an image has no looked for objects - then it should be mentioned just once with no LABEL and the “,,,,,,,” - in place of the ``BOUNDING_BOX``. **Four sample rows:** :: - TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, - TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, - UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 - TEST,gs://folder/im3.png,,,,,,,,, .. raw:: html
.. - raw:: html
.. raw:: html

AutoML Video - Intelligence .. raw:: html

.. raw:: html
.. raw:: html
.. raw:: html -
Classification .. raw:: html
See `Preparing your - training data `__ for more information. CSV - file(s) with each line in format: :: ML_USE,GCS_FILE_PATH For - ``ML_USE``, do not use ``VALIDATE``. ``GCS_FILE_PATH`` is the path to - another .csv file that describes training example for a given - ``ML_USE``, using the following row format: :: - GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,) Here - ``GCS_FILE_PATH`` leads to a video of up to 50GB in size and up to 3h - duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. - ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the - length of the video, and the end time must be after the start time. - Any segment of a video which has one or more labels on it, is - considered a hard negative for all other labels. Any segment with no - labels on it is considered to be unknown. If a whole video is unknown, - then it should be mentioned just once with “,,” in place of ``LABEL, - TIME_SEGMENT_START,TIME_SEGMENT_END``. Sample top level CSV file: :: - TRAIN,gs://folder/train_videos.csv TEST,gs://folder/test_videos.csv - UNASSIGNED,gs://folder/other_videos.csv Sample rows of a CSV file for - a particular ML_USE: :: gs://folder/video1.avi,car,120,180.000021 - gs://folder/video1.avi,bike,150,180.000021 - gs://folder/vid2.avi,car,0,60.5 gs://folder/vid3.avi,,, .. raw:: - html
.. raw:: html
.. raw:: html -
Object Tracking .. raw:: html
See `Preparing your - training data `__ for more information. CSV file(s) with each - line in format: :: ML_USE,GCS_FILE_PATH For ``ML_USE``, do not - use ``VALIDATE``. ``GCS_FILE_PATH`` is the path to another .csv file - that describes training example for a given ``ML_USE``, using the - following row format: :: - GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX or :: - GCS_FILE_PATH,,,,,,,,,, Here ``GCS_FILE_PATH`` leads to a video of up - to 50GB in size and up to 3h duration. Supported extensions: .MOV, - .MPEG4, .MP4, .AVI. Providing ``INSTANCE_ID``\ s can help to obtain a - better model. When a specific labeled entity leaves the video frame, - and shows up afterwards it is not required, albeit preferable, that - the same ``INSTANCE_ID`` is given to it. ``TIMESTAMP`` must be within - the length of the video, the ``BOUNDING_BOX`` is assumed to be drawn - on the closest video’s frame to the ``TIMESTAMP``. Any mentioned by - the ``TIMESTAMP`` frame is expected to be exhaustively labeled and no - more than 500 ``BOUNDING_BOX``-es per frame are allowed. If a whole - video is unknown, then it should be mentioned just once with - “,,,,,,,,,,” in place of ``LABEL, - [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX``. Sample top level CSV file: - :: TRAIN,gs://folder/train_videos.csv - TEST,gs://folder/test_videos.csv - UNASSIGNED,gs://folder/other_videos.csv Seven sample rows of a CSV - file for a particular ML_USE: :: - gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 - gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 - gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 - gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, - gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, - gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, - gs://folder/video2.avi,,,,,,,,,,, .. raw:: html
.. - raw:: html
.. raw:: html

AutoML Natural Language - .. raw:: html

.. raw:: html
.. raw:: html
.. raw:: html
Entity - Extraction .. raw:: html
See `Preparing your training data - `__ for more - information. One or more CSV file(s) with each line in the following - format: :: ML_USE,GCS_FILE_PATH - ``ML_USE`` - Identifies the - data set that the current row (file) applies to. This value can be - one of the following: - ``TRAIN`` - Rows in this file are used to - train the model. - ``TEST`` - Rows in this file are used to test - the model during training. - ``UNASSIGNED`` - Rows in this - file are not categorized. They are Automatically divided into - train and test data. 80% for training and 20% for testing.. - - ``GCS_FILE_PATH`` - a Identifies JSON Lines (.JSONL) file stored in - Google Cloud Storage that contains in-line text in-line as documents - for model training. After the training data set has been determined - from the ``TRAIN`` and ``UNASSIGNED`` CSV files, the training data is - divided into train and validation data sets. 70% for training and 30% - for validation. For example: :: TRAIN,gs://folder/file1.jsonl - VALIDATE,gs://folder/file2.jsonl TEST,gs://folder/file3.jsonl - **In-line JSONL files** In-line .JSONL files contain, per line, a - JSON document that wraps a - [``text_snippet``][google.cloud.automl.v1.TextSnippet] field followed - by one or more - [``annotations``][google.cloud.automl.v1.AnnotationPayload] fields, - which have ``display_name`` and ``text_extraction`` fields to describe - the entity from the text snippet. Multiple JSON documents can be - separated using line breaks (``\\n``). The supplied text must - be annotated exhaustively. For example, if you include the text - “horse”, but do not label it as “animal”, then “horse” is assumed to - not be an “animal”. Any given text snippet content must have 30,000 - characters or less, and also be UTF-8 NFC encoded. ASCII is accepted - as it is UTF-8 NFC encoded. For example: :: { - "text_snippet": { "content": "dog car cat" }, - "annotations": [ { "display_name": "animal", - "text_extraction": { "text_segment": {"start_offset": 0, - "end_offset": 2} } }, { - "display_name": "vehicle", "text_extraction": { - "text_segment": {"start_offset": 4, "end_offset": 6} } - }, { "display_name": "animal", - "text_extraction": { "text_segment": {"start_offset": 8, - "end_offset": 10} } } ] }\\n { - "text_snippet": { "content": "This dog is good." }, - "annotations": [ { "display_name": "animal", - "text_extraction": { "text_segment": {"start_offset": 5, - "end_offset": 7} } } ] } **JSONL files - that reference documents** .JSONL files contain, per line, a JSON - document that wraps a ``input_config`` that contains the path to a - source document. Multiple JSON documents can be separated using line - breaks (``\\n``). Supported document extensions: .PDF, .TIF, - .TIFF For example: :: { "document": { - "input_config": { "gcs_source": { "input_uris": [ - "gs://folder/document1.pdf" ] } } } }\\n { - "document": { "input_config": { "gcs_source": { - "input_uris": [ "gs://folder/document2.tif" ] } } - } } **In-line JSONL files with document layout information** - **Note:** You can only annotate documents using the UI. The format - described below applies to annotated documents exported using the UI - or ``exportData``. In-line .JSONL files for documents contain, per - line, a JSON document that wraps a ``document`` field that provides - the textual content of the document and the layout information. For - example: :: { "document": { "document_text": { - "content": "dog car cat" } "layout": [ - { "text_segment": { - "start_offset": 0, "end_offset": 11, - }, "page_number": 1, - "bounding_poly": { "normalized_vertices": [ - {"x": 0.1, "y": 0.1}, {"x": 0.1, "y": 0.3}, - {"x": 0.3, "y": 0.3}, {"x": 0.3, "y": 0.1}, - ], }, "text_segment_type": - TOKEN, } ], - "document_dimensions": { "width": 8.27, - "height": 11.69, "unit": INCH, } - "page_count": 3, }, "annotations": [ - { "display_name": "animal", - "text_extraction": { "text_segment": {"start_offset": - 0, "end_offset": 3} } }, { - "display_name": "vehicle", "text_extraction": { - "text_segment": {"start_offset": 4, "end_offset": 7} } - }, { "display_name": "animal", - "text_extraction": { "text_segment": {"start_offset": - 8, "end_offset": 11} } }, ], - .. raw:: html
.. raw:: html
.. raw:: - html
Classification .. raw:: html
See `Preparing - your training data `__ for more information. One or more - CSV file(s) with each line in the following format: :: - ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,... - ``ML_USE`` - - Identifies the data set that the current row (file) applies to. - This value can be one of the following: - ``TRAIN`` - Rows in - this file are used to train the model. - ``TEST`` - Rows in this - file are used to test the model during training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They are - Automatically divided into train and test data. 80% for training - and 20% for testing. - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are - distinguished by a pattern. If the column content is a valid Google - Cloud Storage file path, that is, prefixed by “gs://”, it is - treated as a ``GCS_FILE_PATH``. Otherwise, if the content is - enclosed in double quotes ("“), it is treated as a - ``TEXT_SNIPPET``. For ``GCS_FILE_PATH``, the path must lead to a - file with supported extension and UTF-8 encoding, for - example,”gs://folder/content.txt" AutoML imports the file content - as a text snippet. For ``TEXT_SNIPPET``, AutoML imports the column - content excluding quotes. In both cases, size of the content must - be 10MB or less in size. For zip files, the size of each file - inside the zip must be 10MB or less in size. For the - ``MULTICLASS`` classification type, at most one ``LABEL`` is - allowed. The ``ML_USE`` and ``LABEL`` columns are optional. - Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP A maximum - of 100 unique labels are allowed per CSV row. Sample rows: :: - TRAIN,"They have bad food and very rude",RudeService,BadFood - gs://folder/content.txt,SlowService TEST,gs://folder/document.pdf - VALIDATE,gs://folder/text_files.zip,BadFood .. raw:: html -
.. raw:: html
.. raw:: html
- Sentiment Analysis .. raw:: html
See `Preparing your - training data `__ for more information. CSV file(s) - with each line in format: :: ML_USE,(TEXT_SNIPPET | - GCS_FILE_PATH),SENTIMENT - ``ML_USE`` - Identifies the data set that - the current row (file) applies to. This value can be one of the - following: - ``TRAIN`` - Rows in this file are used to train the - model. - ``TEST`` - Rows in this file are used to test the model - during training. - ``UNASSIGNED`` - Rows in this file are - not categorized. They are Automatically divided into train and - test data. 80% for training and 20% for testing. - - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a - pattern. If the column content is a valid Google Cloud Storage file - path, that is, prefixed by “gs://”, it is treated as a - ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in double - quotes ("“), it is treated as a ``TEXT_SNIPPET``. For - ``GCS_FILE_PATH``, the path must lead to a file with supported - extension and UTF-8 encoding, for example,”gs://folder/content.txt" - AutoML imports the file content as a text snippet. For - ``TEXT_SNIPPET``, AutoML imports the column content excluding quotes. - In both cases, size of the content must be 128kB or less in size. For - zip files, the size of each file inside the zip must be 128kB or less - in size. The ``ML_USE`` and ``SENTIMENT`` columns are optional. - Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP - - ``SENTIMENT`` - An integer between 0 and - Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive). - Describes the ordinal of the sentiment - higher value means a more - positive sentiment. All the values are completely relative, - i.e. neither 0 needs to mean a negative or neutral sentiment nor - sentiment_max needs to mean a positive one - it is just required that - 0 is the least positive sentiment in the data, and sentiment_max is - the most positive one. The SENTIMENT shouldn’t be confused with - “score” or “magnitude” from the previous Natural Language Sentiment - Analysis API. All SENTIMENT values between 0 and sentiment_max must - be represented in the imported data. On prediction the same 0 to - sentiment_max range will be used. The difference between neighboring - sentiment values needs not to be uniform, e.g. 1 and 2 may be similar - whereas the difference between 2 and 3 may be large. Sample rows: :: - TRAIN,"@freewrytin this is way too good for your product",2 - gs://folder/content.txt,3 TEST,gs://folder/document.pdf - VALIDATE,gs://folder/text_files.zip,2 .. raw:: html
- .. raw:: html
.. raw:: html

AutoML Tables .. - raw:: html

.. raw:: html
.. raw:: html
See `Preparing - your training data `__ for more information. You can use either - [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or - [bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source]. - All input is concatenated into a single [primary_table_spec_id][googl - e.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id] **For - gcs_source:** CSV file(s), where the first row of the first file is - the header, containing unique column names. If the first row of a - subsequent file is the same as the header, then it is also treated as - a header. All other rows contain values for the corresponding columns. - Each .CSV file by itself must be 10GB or smaller, and their total size - must be 100GB or smaller. First three sample rows of a CSV file: .. - raw:: html
    "Id","First Name","Last
-  Name","Dob","Addresses"     "1","John","Doe","1968-01-22","[{"status":
-  "current","address":"123_First_Avenue","city":"Seattle","state":"WA","
-  zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456\_
-  Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYea
-  rs":"5"}]"     "2","Jane","Doe","1980-10-16","[{"status":"current","ad
-  dress":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","nu
-  mberOfYears":"2"},{"status":"previous","address":"321_Main_Street","ci
-  ty":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
-  
**For bigquery_source:** An URI of a BigQuery table. The user - data size of the BigQuery table must be 100GB or smaller. An imported - table must have between 2 and 1,000 columns, inclusive, and between - 1000 and 100,000,000 rows, inclusive. There are at most 5 import data - running in parallel. .. raw:: html
.. raw:: html -
**Input field definitions:** ``ML_USE`` (“TRAIN” \| - “VALIDATE” \| “TEST” \| “UNASSIGNED”) Describes how the given - example (file) should be used for model training. “UNASSIGNED” can - be used when user has no preference. ``GCS_FILE_PATH`` The path to - a file on Google Cloud Storage. For example, - “gs://folder/image1.png”. ``LABEL`` A display name of an object on - an image, video etc., e.g. “dog”. Must be up to 32 characters long - and can consist only of ASCII Latin letters A-Z and a-z, - underscores(_), and ASCII digits 0-9. For each label an - AnnotationSpec is created which display_name becomes the label; - AnnotationSpecs are given back in predictions. ``INSTANCE_ID`` A - positive integer that identifies a specific instance of a labeled - entity on an example. Used e.g. to track two cars on a video while - being able to tell apart which one is which. ``BOUNDING_BOX`` - (``VERTEX,VERTEX,VERTEX,VERTEX`` \| ``VERTEX,,,VERTEX,,``) A - rectangle parallel to the frame of the example (image, video). If 4 - vertices are given they are connected by edges in the order provided, - if 2 are given they are recognized as diagonally opposite vertices of - the rectangle. ``VERTEX`` (``COORDINATE,COORDINATE``) First - coordinate is horizontal (x), the second is vertical (y). - ``COORDINATE`` A float in 0 to 1 range, relative to total length of - image or video in given dimension. For fractions the leading non- - decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is in top left. - ``TIME_SEGMENT_START`` (``TIME_OFFSET``) Expresses a beginning, - inclusive, of a time segment within an example that has a time - dimension (e.g. video). ``TIME_SEGMENT_END`` (``TIME_OFFSET``) - Expresses an end, exclusive, of a time segment within n example - that has a time dimension (e.g. video). ``TIME_OFFSET`` A number of - seconds as measured from the start of an example (e.g. video). - Fractions are allowed, up to a microsecond precision. “inf” is - allowed, and it means the end of the example. ``TEXT_SNIPPET`` The - content of a text snippet, UTF-8 encoded, enclosed within double - quotes (""). ``DOCUMENT`` A field that provides the textual content - with document and the layout information. **Errors:** If any of - the provided CSV files can’t be parsed or if more than certain percent - of CSV rows cannot be processed then the operation fails and nothing - is imported. Regardless of overall success or failure the per-row - failures, up to a certain count cap, is listed in - Operation.metadata.partial_failures. - - Attributes: - source: - The source of the input. - gcs_source: - The Google Cloud Storage location for the input content. For - [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], - ``gcs_source`` points to a CSV file with a structure described - in [InputConfig][google.cloud.automl.v1.InputConfig]. - params: - Additional domain-specific parameters describing the semantic - of the imported data, any string must be up to 25000 - characters long. .. raw:: html

AutoML Tables .. - raw:: html

``schema_inference_version`` - (integer) This value must be supplied. The version of the - algorithm to use for the initial inference of the column - data types of the imported table. Allowed values: “1”. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.InputConfig) - }, -) -_sym_db.RegisterMessage(InputConfig) -_sym_db.RegisterMessage(InputConfig.ParamsEntry) - -BatchPredictInputConfig = _reflection.GeneratedProtocolMessageType( - "BatchPredictInputConfig", - (_message.Message,), - { - "DESCRIPTOR": _BATCHPREDICTINPUTCONFIG, - "__module__": "google.cloud.automl_v1.proto.io_pb2", - "__doc__": """Input configuration for BatchPredict Action. The format of input - depends on the ML problem of the model used for prediction. As input - source the [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] - is expected, unless specified otherwise. The formats are represented - in EBNF with commas being literal and with non-terminal symbols - defined near the end of this comment. The formats are: .. raw:: html -

AutoML Vision .. raw:: html

.. raw:: html
.. raw:: html
.. raw:: html -
Classification .. raw:: html
One or more CSV files - where each line is a single column: :: GCS_FILE_PATH The Google - Cloud Storage location of an image of up to 30MB in size. Supported - extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in the - batch predict output. Sample rows: :: gs://folder/image1.jpeg - gs://folder/image2.gif gs://folder/image3.png .. raw:: html -
.. raw:: html
.. raw:: html
- Object Detection .. raw:: html
One or more CSV files where - each line is a single column: :: GCS_FILE_PATH The Google Cloud - Storage location of an image of up to 30MB in size. Supported - extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in the - batch predict output. Sample rows: :: gs://folder/image1.jpeg - gs://folder/image2.gif gs://folder/image3.png .. raw:: html -
.. raw:: html
.. raw:: html

AutoML - Video Intelligence .. raw:: html

.. raw:: html
.. raw:: html
.. raw:: html -
Classification .. raw:: html
One or more CSV files - where each line is a single column: :: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END ``GCS_FILE_PATH`` - is the Google Cloud Storage location of video up to 50GB in size and - up to 3h in duration duration. Supported extensions: .MOV, .MPEG4, - .MP4, .AVI. ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be - within the length of the video, and the end time must be after the - start time. Sample rows: :: gs://folder/video1.mp4,10,40 - gs://folder/video1.mp4,20,60 gs://folder/vid2.mov,0,inf .. raw:: - html
.. raw:: html
.. raw:: html -
Object Tracking .. raw:: html
One or more CSV files - where each line is a single column: :: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END ``GCS_FILE_PATH`` - is the Google Cloud Storage location of video up to 50GB in size and - up to 3h in duration duration. Supported extensions: .MOV, .MPEG4, - .MP4, .AVI. ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be - within the length of the video, and the end time must be after the - start time. Sample rows: :: gs://folder/video1.mp4,10,40 - gs://folder/video1.mp4,20,60 gs://folder/vid2.mov,0,inf .. raw:: - html
.. raw:: html
.. raw:: html

- AutoML Natural Language .. raw:: html

.. raw:: html -
.. raw:: html
.. raw:: - html
Classification .. raw:: html
One or more - CSV files where each line is a single column: :: GCS_FILE_PATH - ``GCS_FILE_PATH`` is the Google Cloud Storage location of a text file. - Supported file extensions: .TXT, .PDF, .TIF, .TIFF Text files can be - no larger than 10MB in size. Sample rows: :: - gs://folder/text1.txt gs://folder/text2.pdf - gs://folder/text3.tif .. raw:: html
.. raw:: html -
.. raw:: html
Sentiment Analysis .. raw:: html -
One or more CSV files where each line is a single column: :: - GCS_FILE_PATH ``GCS_FILE_PATH`` is the Google Cloud Storage location - of a text file. Supported file extensions: .TXT, .PDF, .TIF, .TIFF - Text files can be no larger than 128kB in size. Sample rows: :: - gs://folder/text1.txt gs://folder/text2.pdf - gs://folder/text3.tif .. raw:: html
.. raw:: html -
.. raw:: html
Entity Extraction .. raw:: html -
One or more JSONL (JSON Lines) files that either provide inline - text or documents. You can only use one format, either inline text or - documents, for a single call to [AutoMl.BatchPredict]. Each JSONL - file contains a per line a proto that wraps a temporary user-assigned - TextSnippet ID (string up to 2000 characters long) called “id”, a - TextSnippet proto (in JSON representation) and zero or more - TextFeature protos. Any given text snippet content must have 30,000 - characters or less, and also be UTF-8 NFC encoded (ASCII already is). - The IDs provided should be unique. Each document JSONL file contains, - per line, a proto that wraps a Document proto with ``input_config`` - set. Each document cannot exceed 2MB in size. Supported document - extensions: .PDF, .TIF, .TIFF Each JSONL file must not exceed 100MB - in size, and no more than 20 JSONL files may be passed. Sample inline - JSONL file (Shown with artificial line breaks. Actual line breaks are - denoted by “``\\n``”.): :: { "id": "my_first_id", - "text_snippet": { "content": "dog car cat"}, "text_features": [ - { "text_segment": {"start_offset": 4, "end_offset": 6}, - "structural_type": PARAGRAPH, "bounding_poly": { - "normalized_vertices": [ {"x": 0.1, "y": 0.1}, - {"x": 0.1, "y": 0.3}, {"x": 0.3, "y": 0.3}, - {"x": 0.3, "y": 0.1}, ] }, } ], - }\\n { "id": "2", "text_snippet": { "content": - "Extended sample content", "mime_type": "text/plain" } - } Sample document JSONL file (Shown with artificial line breaks. - Actual line breaks are denoted by “``\\n``”.): :: { - "document": { "input_config": { "gcs_source": { - "input_uris": [ "gs://folder/document1.pdf" ] } } - } }\\n { "document": { "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] - } } } } .. raw:: html
.. raw:: html -
.. raw:: html

AutoML Tables .. raw:: html

- .. raw:: html
.. raw:: html -
See `Preparing your training data - `__ for - more information. You can use either [gcs_source][google.cloud.automl - .v1.BatchPredictInputConfig.gcs_source] or - [bigquery_source][BatchPredictInputConfig.bigquery_source]. **For - gcs_source:** CSV file(s), each by itself 10GB or smaller and total - size must be 100GB or smaller, where first file must have a header - containing column names. If the first row of a subsequent file is the - same as the header, then it is also treated as a header. All other - rows contain values for the corresponding columns. The column names - must contain the model’s [input_feature_column_specs’][google.cloud.a - utoml.v1.TablesModelMetadata.input_feature_column_specs] - [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] - (order doesn’t matter). The columns corresponding to the model’s input - feature column specs must contain values compatible with the column - spec’s data types. Prediction on all the rows, i.e. the CSV lines, - will be attempted. Sample rows from a CSV file: .. raw:: html -
    "First Name","Last Name","Dob","Addresses"     "John","Doe","
-  1968-01-22","[{"status":"current","address":"123_First_Avenue","city":
-  "Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"p
-  revious","address":"456_Main_Street","city":"Portland","state":"OR","z
-  ip":"22222","numberOfYears":"5"}]"     "Jane","Doe","1980-10-16","[{"s
-  tatus":"current","address":"789_Any_Avenue","city":"Albany","state":"N
-  Y","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"
-  321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOf
-  Years":"3"}]}    
**For bigquery_source:** The URI of a - BigQuery table. The user data size of the BigQuery table must be 100GB - or smaller. The column names must contain the model’s [input_feature - _column_specs’][google.cloud.automl.v1.TablesModelMetadata.input_featu - re_column_specs] - [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] - (order doesn’t matter). The columns corresponding to the model’s input - feature column specs must contain values compatible with the column - spec’s data types. Prediction on all the rows of the table will be - attempted. .. raw:: html
.. raw:: html
- **Input field definitions:** ``GCS_FILE_PATH`` The path to a file - on Google Cloud Storage. For example, “gs://folder/video.avi”. - ``TIME_SEGMENT_START`` (``TIME_OFFSET``) Expresses a beginning, - inclusive, of a time segment within an example that has a time - dimension (e.g. video). ``TIME_SEGMENT_END`` (``TIME_OFFSET``) - Expresses an end, exclusive, of a time segment within n example - that has a time dimension (e.g. video). ``TIME_OFFSET`` A number of - seconds as measured from the start of an example (e.g. video). - Fractions are allowed, up to a microsecond precision. “inf” is - allowed, and it means the end of the example. **Errors:** If any of - the provided CSV files can’t be parsed or if more than certain percent - of CSV rows cannot be processed then the operation fails and - prediction does not happen. Regardless of overall success or failure - the per-row failures, up to a certain count cap, will be listed in - Operation.metadata.partial_failures. - - Attributes: - source: - The source of the input. - gcs_source: - Required. The Google Cloud Storage location for the input - content. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.BatchPredictInputConfig) - }, -) -_sym_db.RegisterMessage(BatchPredictInputConfig) - -DocumentInputConfig = _reflection.GeneratedProtocolMessageType( - "DocumentInputConfig", - (_message.Message,), - { - "DESCRIPTOR": _DOCUMENTINPUTCONFIG, - "__module__": "google.cloud.automl_v1.proto.io_pb2", - "__doc__": """Input configuration of a [Document][google.cloud.automl.v1.Document]. - - Attributes: - gcs_source: - The Google Cloud Storage location of the document file. Only a - single path should be given. Max supported size: 512MB. - Supported extensions: .PDF. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DocumentInputConfig) - }, -) -_sym_db.RegisterMessage(DocumentInputConfig) - -OutputConfig = _reflection.GeneratedProtocolMessageType( - "OutputConfig", - (_message.Message,), - { - "DESCRIPTOR": _OUTPUTCONFIG, - "__module__": "google.cloud.automl_v1.proto.io_pb2", - "__doc__": """\* For Translation: CSV file ``translation.csv``, with each line in - format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV file which - describes examples that have given ML_USE, using the following row - format per line: TEXT_SNIPPET (in source language) :raw-latex:`\t - `TEXT_SNIPPET (in target language) - For Tables: Output depends on - whether the dataset was imported from Google Cloud Storage or - BigQuery. Google Cloud Storage case: [gcs_destination][google.cloud.a - utoml.v1p1beta.OutputConfig.gcs_destination] must be set. Exported are - CSV file(s) ``tables_1.csv``, ``tables_2.csv``,…,\ ``tables_N.csv`` - with each having as header line the table’s column names, and all - other lines contain values for the header columns. BigQuery case: [bi - gquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery - _destination] pointing to a BigQuery project must be set. In the given - project a new dataset will be created with name - ``export_data__`` where will be made BigQuery-dataset-name compatible (e.g. most - special characters will become underscores), and timestamp will be in - YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In that dataset a - new table called ``primary_table`` will be created, and filled with - precisely the same data as this obtained on import. - - Attributes: - destination: - The destination of the output. - gcs_destination: - Required. The Google Cloud Storage location where the output - is to be written to. For Image Object Detection, Text - Extraction, Video Classification and Tables, in the given - directory a new directory will be created with name: - export_data-- where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ - ISO-8601 format. All export output will be written into that - directory. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.OutputConfig) - }, -) -_sym_db.RegisterMessage(OutputConfig) - -BatchPredictOutputConfig = _reflection.GeneratedProtocolMessageType( - "BatchPredictOutputConfig", - (_message.Message,), - { - "DESCRIPTOR": _BATCHPREDICTOUTPUTCONFIG, - "__module__": "google.cloud.automl_v1.proto.io_pb2", - "__doc__": """Output configuration for BatchPredict Action. As destination the [gc - s_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs_des - tination] must be set unless specified otherwise for a domain. If - gcs_destination is set then in the given directory a new directory is - created. Its name will be “prediction--”, where timestamp is in YYYY- - MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends on the - ML problem the predictions are made for. - For Image Classification: - In the created directory files ``image_classification_1.jsonl``, - ``image_classification_2.jsonl``,…,\ ``image_classification_N.jsonl`` - will be created, where N may be 1, and depends on the total number of - the successfully predicted images and annotations. A single image - will be listed only once with all its annotations, and its - annotations will never be split across files. Each .JSONL file will - contain, per line, a JSON representation of a proto that wraps - image’s “ID” : “” followed by a list of zero or more - AnnotationPayload protos (called annotations), which have - classification detail populated. If prediction for any image failed - (partially or completely), then an additional ``errors_1.jsonl``, - ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N - depends on total number of failed predictions). These files will have - a JSON representation of a proto that wraps the same “ID” : “” but - here followed by exactly one ```google.rpc.Status`` `_\_ - containing only ``code`` and ``message``\ fields. - For Image Object - Detection: In the created directory files - ``image_object_detection_1.jsonl``, - ``image_object_detection_2.jsonl``,…,\ - ``image_object_detection_N.jsonl`` will be created, where N may be - 1, and depends on the total number of the successfully predicted - images and annotations. Each .JSONL file will contain, per line, a - JSON representation of a proto that wraps image’s “ID” : “” - followed by a list of zero or more AnnotationPayload protos (called - annotations), which have image_object_detection detail populated. A - single image will be listed only once with all its annotations, and - its annotations will never be split across files. If prediction for - any image failed (partially or completely), then additional - ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files - will be created (N depends on total number of failed predictions). - These files will have a JSON representation of a proto that wraps - the same “ID” : “” but here followed by exactly one - ```google.rpc.Status`` `__ containing only ``code`` and - ``message``\ fields. \* For Video Classification: In the created - directory a video_classification.csv file, and a .JSON file per each - video classification requested in the input (i.e. each line in given - CSV(s)), will be created. :: The format of - video_classification.csv is: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SE - GMENT_END,JSON_FILE_NAME,STATUS where: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 the - prediction input lines (i.e. video_classification.csv has precisely - the same number of lines as the prediction input had.) JSON_FILE_NAME - = Name of .JSON file in the output directory, which contains - prediction responses for the video time segment. STATUS = “OK” if - prediction completed successfully, or an error code with message - otherwise. If STATUS is not “OK” then the .JSON file for that line may - not exist or be empty. :: Each .JSON file, assuming STATUS is - "OK", will contain a list of AnnotationPayload protos in JSON - format, which are the predictions for the video time segment - the file is assigned to in the video_classification.csv. All - AnnotationPayload protos will have video_classification field - set, and will be sorted by video_classification.type field - (note that the returned types are governed by - `classifaction_types` parameter in - [PredictService.BatchPredictRequest.params][]). - For Video Object - Tracking: In the created directory a video_object_tracking.csv file - will be created, and multiple files video_object_trackinng_1.json, - video_object_trackinng_2.json,…, video_object_trackinng_N.json, - where N is the number of requests in the input (i.e. the number of - lines in given CSV(s)). :: The format of - video_object_tracking.csv is: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_S - EGMENT_END,JSON_FILE_NAME,STATUS where: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 the - prediction input lines (i.e. video_object_tracking.csv has precisely - the same number of lines as the prediction input had.) JSON_FILE_NAME - = Name of .JSON file in the output directory, which contains - prediction responses for the video time segment. STATUS = “OK” if - prediction completed successfully, or an error code with message - otherwise. If STATUS is not “OK” then the .JSON file for that line may - not exist or be empty. :: Each .JSON file, assuming STATUS is - "OK", will contain a list of AnnotationPayload protos in JSON - format, which are the predictions for each frame of the video - time segment the file is assigned to in - video_object_tracking.csv. All AnnotationPayload protos will have - video_object_tracking field set. - For Text Classification: In the - created directory files ``text_classification_1.jsonl``, - ``text_classification_2.jsonl``,…,\ ``text_classification_N.jsonl`` - will be created, where N may be 1, and depends on the total number of - inputs and annotations found. :: Each .JSONL file will - contain, per line, a JSON representation of a proto that wraps - input text file (or document) in the text snippet (or document) - proto and a list of zero or more AnnotationPayload protos - (called annotations), which have classification detail - populated. A single text file (or document) will be listed only - once with all its annotations, and its annotations will never be - split across files. If prediction for any input file (or - document) failed (partially or completely), then additional - `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl` files - will be created (N depends on total number of failed - predictions). These files will have a JSON representation of a - proto that wraps input file followed by exactly one - ```google.rpc.Status`` `__ containing only ``code`` and - ``message``. - For Text Sentiment: In the created directory files - ``text_sentiment_1.jsonl``, ``text_sentiment_2.jsonl``,…,\ - ``text_sentiment_N.jsonl`` will be created, where N may be 1, and - depends on the total number of inputs and annotations found. :: - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text file (or document) in the text - snippet (or document) proto and a list of zero or more - AnnotationPayload protos (called annotations), which have - text_sentiment detail populated. A single text file (or - document) will be listed only once with all its annotations, and its - annotations will never be split across files. If prediction for - any input file (or document) failed (partially or completely), - then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input file followed by exactly one - ```google.rpc.Status`` `__ containing only ``code`` and - ``message``. - For Text Extraction: In the created directory files - ``text_extraction_1.jsonl``, ``text_extraction_2.jsonl``,…,\ - ``text_extraction_N.jsonl`` will be created, where N may be 1, and - depends on the total number of inputs and annotations found. The - contents of these .JSONL file(s) depend on whether the input used - inline text, or documents. If input was inline, then each .JSONL - file will contain, per line, a JSON representation of a proto that - wraps given in request text snippet’s “id” (if specified), followed - by input text snippet, and a list of zero or more AnnotationPayload - protos (called annotations), which have text_extraction detail - populated. A single text snippet will be listed only once with all - its annotations, and its annotations will never be split across - files. If input used documents, then each .JSONL file will contain, - per line, a JSON representation of a proto that wraps given in - request document proto, followed by its OCR-ed representation in - the form of a text snippet, finally followed by a list of zero or - more AnnotationPayload protos (called annotations), which have - text_extraction detail populated and refer, via their indices, to - the OCR-ed text snippet. A single document (and its text snippet) - will be listed only once with all its annotations, and its - annotations will never be split across files. If prediction for any - text snippet failed (partially or completely), then additional - ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files - will be created (N depends on total number of failed predictions). - These files will have a JSON representation of a proto that wraps - either the “id” : “” (in case of inline) or the document proto (in - case of document) but here followed by exactly one - ```google.rpc.Status`` `__ containing only ``code`` and - ``message``. - For Tables: Output depends on whether [gcs_destinati - on][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destinat - ion] or [bigquery_destination][google.cloud.automl.v1p1beta.BatchPred - ictOutputConfig.bigquery_destination] is set (either is allowed). - Google Cloud Storage case: In the created directory files - ``tables_1.csv``, ``tables_2.csv``,…, ``tables_N.csv`` will be - created, where N may be 1, and depends on the total number of the - successfully predicted rows. For all CLASSIFICATION [prediction_type- - s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: - Each .csv file will contain a header, listing all columns’ - [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] - given on input followed by M target column names in the format of "<[ - target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata. - target_column_spec] [display_name][google.cloud.automl.v1p1beta.Colum - nSpec.display_name]>\_\_score" where M is the number of distinct - target values, i.e. number of distinct values in the target column of - the table used to train the model. Subsequent lines will contain the - respective values of successfully predicted rows, with the last, - i.e. the target, columns having the corresponding prediction - [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score]. For - REGRESSION and FORECASTING [prediction_type-s][google.cloud.automl.v1 - p1beta.TablesModelMetadata.prediction_type]: Each .csv file will - contain a header, listing all columns’ - [display_name-s][google.cloud.automl.v1p1beta.display_name] given on - input followed by the predicted target column with name in the format - of "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.Tab - lesModelMetadata.target_column_spec] - [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" - Subsequent lines will contain the respective values of successfully - predicted rows, with the last, i.e. the target, column having the - predicted target value. If prediction for any rows failed, then an - additional ``errors_1.csv``, ``errors_2.csv``,…, ``errors_N.csv`` will - be created (N depends on total number of failed rows). These files - will have analogous format as ``tables_*.csv``, but always with a - single target column having ```google.rpc.Status`` `_\_ - represented as a JSON string, and containing only ``code`` and - ``message``. BigQuery case: [bigquery_destination][google.cloud.autom - l.v1p1beta.OutputConfig.bigquery_destination] pointing to a BigQuery - project must be set. In the given project a new dataset will be - created with name ``prediction__`` where will be made BigQuery-dataset-name compatible - (e.g. most special characters will become underscores), and timestamp - will be in YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In the - dataset two tables will be created, ``predictions``, and ``errors``. - The ``predictions`` table’s column names will be the input columns’ - [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] - followed by the target column with name in the format of "predicted_< - [target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata - .target_column_spec] - [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" - The input feature columns will contain the respective values of - successfully predicted rows, with the target column having an ARRAY of - [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload], - represented as STRUCT-s, containing - [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation]. The - ``errors`` table contains rows for which the prediction has failed, it - has analogous input columns while the target column name is in the - format of "errors_<[target_column_specs][google.cloud.automl.v1p1beta - .TablesModelMetadata.target_column_spec] [display_name][google.cloud. - automl.v1p1beta.ColumnSpec.display_name]>", and as a value has - ```google.rpc.Status`` `__ represented as a STRUCT, and - containing only ``code`` and ``message``. - - Attributes: - destination: - The destination of the output. - gcs_destination: - Required. The Google Cloud Storage location of the directory - where the output is to be written to. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.BatchPredictOutputConfig) - }, -) -_sym_db.RegisterMessage(BatchPredictOutputConfig) - -ModelExportOutputConfig = _reflection.GeneratedProtocolMessageType( - "ModelExportOutputConfig", - (_message.Message,), - { - "ParamsEntry": _reflection.GeneratedProtocolMessageType( - "ParamsEntry", - (_message.Message,), - { - "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG_PARAMSENTRY, - "__module__": "google.cloud.automl_v1.proto.io_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ModelExportOutputConfig.ParamsEntry) - }, - ), - "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG, - "__module__": "google.cloud.automl_v1.proto.io_pb2", - "__doc__": """Output configuration for ModelExport Action. - - Attributes: - destination: - The destination of the output. - gcs_destination: - Required. The Google Cloud Storage location where the model is - to be written to. This location may only be set for the - following model formats: “tflite”, “edgetpu_tflite”, - “tf_saved_model”, “tf_js”, “core_ml”. Under the directory - given as the destination a new one with name “model-export--”, - where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 - format, will be created. Inside the model and any of its - supporting files will be written. - model_format: - The format in which the model must be exported. The available, - and default, formats depend on the problem and model type (if - given problem and type combination doesn’t have a format - listed, it means its models are not exportable): - For Image - Classification mobile-low-latency-1, mobile-versatile-1, - mobile-high-accuracy-1: “tflite” (default), “edgetpu_tflite”, - “tf_saved_model”, “tf_js”, “docker”. - For Image - Classification mobile-core-ml-low-latency-1, mobile-core- - ml-versatile-1, mobile-core-ml-high-accuracy-1: “core_ml” - (default). - For Image Object Detection mobile-low- - latency-1, mobile-versatile-1, mobile-high-accuracy-1: - “tflite”, “tf_saved_model”, “tf_js”. Formats description: - - tflite - Used for Android mobile devices. - edgetpu_tflite - - Used for `Edge TPU `__ devices. - tf_saved_model - A tensorflow model in - SavedModel format. - tf_js - A `TensorFlow.js - `__ model that can be used - in the browser and in Node.js using JavaScript. - docker - - Used for Docker containers. Use the params field to - customize the container. The container is verified to work - correctly on ubuntu 16.04 operating system. See more at - [containers quickstart](https: - //cloud.google.com/vision/automl/docs/containers-gcs- - quickstart) \* core_ml - Used for iOS mobile devices. - params: - Additional model-type and format specific parameters - describing the requirements for the to be exported model - files, any string must be up to 25000 characters long. - For - ``docker`` format: ``cpu_architecture`` - (string) “x86_64” - (default). ``gpu_architecture`` - (string) “none” (default), - “nvidia”. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ModelExportOutputConfig) - }, -) -_sym_db.RegisterMessage(ModelExportOutputConfig) -_sym_db.RegisterMessage(ModelExportOutputConfig.ParamsEntry) - -GcsSource = _reflection.GeneratedProtocolMessageType( - "GcsSource", - (_message.Message,), - { - "DESCRIPTOR": _GCSSOURCE, - "__module__": "google.cloud.automl_v1.proto.io_pb2", - "__doc__": """The Google Cloud Storage location for the input content. - - Attributes: - input_uris: - Required. Google Cloud Storage URIs to input files, up to 2000 - characters long. Accepted forms: \* Full object path, - e.g. gs://bucket/directory/object.csv - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.GcsSource) - }, -) -_sym_db.RegisterMessage(GcsSource) - -GcsDestination = _reflection.GeneratedProtocolMessageType( - "GcsDestination", - (_message.Message,), - { - "DESCRIPTOR": _GCSDESTINATION, - "__module__": "google.cloud.automl_v1.proto.io_pb2", - "__doc__": """The Google Cloud Storage location where the output is to be written - to. - - Attributes: - output_uri_prefix: - Required. Google Cloud Storage URI to output directory, up to - 2000 characters long. Accepted forms: \* Prefix path: - gs://bucket/directory The requesting user must have write - permission to the bucket. The directory is created if it - doesn’t exist. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.GcsDestination) - }, -) -_sym_db.RegisterMessage(GcsDestination) - - -DESCRIPTOR._options = None -_INPUTCONFIG_PARAMSENTRY._options = None -_BATCHPREDICTINPUTCONFIG.fields_by_name["gcs_source"]._options = None -_OUTPUTCONFIG.fields_by_name["gcs_destination"]._options = None -_BATCHPREDICTOUTPUTCONFIG.fields_by_name["gcs_destination"]._options = None -_MODELEXPORTOUTPUTCONFIG_PARAMSENTRY._options = None -_MODELEXPORTOUTPUTCONFIG.fields_by_name["gcs_destination"]._options = None -_GCSSOURCE.fields_by_name["input_uris"]._options = None -_GCSDESTINATION.fields_by_name["output_uri_prefix"]._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1/proto/io_pb2_grpc.py b/google/cloud/automl_v1/proto/io_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1/proto/io_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1/proto/model_evaluation_pb2.py b/google/cloud/automl_v1/proto/model_evaluation_pb2.py deleted file mode 100644 index 4975d076..00000000 --- a/google/cloud/automl_v1/proto/model_evaluation_pb2.py +++ /dev/null @@ -1,404 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1/proto/model_evaluation.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.automl_v1.proto import ( - classification_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_classification__pb2, -) -from google.cloud.automl_v1.proto import ( - detection_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_detection__pb2, -) -from google.cloud.automl_v1.proto import ( - text_extraction_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_text__extraction__pb2, -) -from google.cloud.automl_v1.proto import ( - text_sentiment_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_text__sentiment__pb2, -) -from google.cloud.automl_v1.proto import ( - translation_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_translation__pb2, -) -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1/proto/model_evaluation.proto", - package="google.cloud.automl.v1", - syntax="proto3", - serialized_options=b"\n\032com.google.cloud.automl.v1P\001Z\n\x0cinput_config\x18\x03 \x01(\x0b\x32#.google.cloud.automl.v1.InputConfigB\x03\xe0\x41\x02"\x8a\x01\n\x11\x45xportDataRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset\x12@\n\routput_config\x18\x03 \x01(\x0b\x32$.google.cloud.automl.v1.OutputConfigB\x03\xe0\x41\x02"V\n\x18GetAnnotationSpecRequest\x12:\n\x04name\x18\x01 \x01(\tB,\xe0\x41\x02\xfa\x41&\n$automl.googleapis.com/AnnotationSpec"\x82\x01\n\x12\x43reateModelRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x31\n\x05model\x18\x04 \x01(\x0b\x32\x1d.google.cloud.automl.v1.ModelB\x03\xe0\x41\x02"D\n\x0fGetModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\x85\x01\n\x11ListModelsRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"[\n\x12ListModelsResponse\x12,\n\x05model\x18\x01 \x03(\x0b\x32\x1d.google.cloud.automl.v1.Model\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"G\n\x12\x44\x65leteModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"}\n\x12UpdateModelRequest\x12\x31\n\x05model\x18\x01 \x01(\x0b\x32\x1d.google.cloud.automl.v1.ModelB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"\xe3\x02\n\x12\x44\x65ployModelRequest\x12\x7f\n0image_object_detection_model_deployment_metadata\x18\x02 \x01(\x0b\x32\x43.google.cloud.automl.v1.ImageObjectDetectionModelDeploymentMetadataH\x00\x12|\n.image_classification_model_deployment_metadata\x18\x04 \x01(\x0b\x32\x42.google.cloud.automl.v1.ImageClassificationModelDeploymentMetadataH\x00\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/ModelB\x1b\n\x19model_deployment_metadata"I\n\x14UndeployModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\x94\x01\n\x12\x45xportModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12K\n\routput_config\x18\x03 \x01(\x0b\x32/.google.cloud.automl.v1.ModelExportOutputConfigB\x03\xe0\x41\x02"X\n\x19GetModelEvaluationRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%automl.googleapis.com/ModelEvaluation"\x8e\x01\n\x1bListModelEvaluationsRequest\x12\x33\n\x06parent\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12\x13\n\x06\x66ilter\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"z\n\x1cListModelEvaluationsResponse\x12\x41\n\x10model_evaluation\x18\x01 \x03(\x0b\x32\'.google.cloud.automl.v1.ModelEvaluation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xe8\x1b\n\x06\x41utoMl\x12\xcb\x01\n\rCreateDataset\x12,.google.cloud.automl.v1.CreateDatasetRequest\x1a\x1d.google.longrunning.Operation"m\x82\xd3\xe4\x93\x02\x37",/v1/{parent=projects/*/locations/*}/datasets:\x07\x64\x61taset\xda\x41\x0eparent,dataset\xca\x41\x1c\n\x07\x44\x61taset\x12\x11OperationMetadata\x12\x95\x01\n\nGetDataset\x12).google.cloud.automl.v1.GetDatasetRequest\x1a\x1f.google.cloud.automl.v1.Dataset";\x82\xd3\xe4\x93\x02.\x12,/v1/{name=projects/*/locations/*/datasets/*}\xda\x41\x04name\x12\xa8\x01\n\x0cListDatasets\x12+.google.cloud.automl.v1.ListDatasetsRequest\x1a,.google.cloud.automl.v1.ListDatasetsResponse"=\x82\xd3\xe4\x93\x02.\x12,/v1/{parent=projects/*/locations/*}/datasets\xda\x41\x06parent\x12\xbb\x01\n\rUpdateDataset\x12,.google.cloud.automl.v1.UpdateDatasetRequest\x1a\x1f.google.cloud.automl.v1.Dataset"[\x82\xd3\xe4\x93\x02?24/v1/{dataset.name=projects/*/locations/*/datasets/*}:\x07\x64\x61taset\xda\x41\x13\x64\x61taset,update_mask\x12\xc6\x01\n\rDeleteDataset\x12,.google.cloud.automl.v1.DeleteDatasetRequest\x1a\x1d.google.longrunning.Operation"h\x82\xd3\xe4\x93\x02.*,/v1/{name=projects/*/locations/*/datasets/*}\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xdc\x01\n\nImportData\x12).google.cloud.automl.v1.ImportDataRequest\x1a\x1d.google.longrunning.Operation"\x83\x01\x82\xd3\xe4\x93\x02<"7/v1/{name=projects/*/locations/*/datasets/*}:importData:\x01*\xda\x41\x11name,input_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xdd\x01\n\nExportData\x12).google.cloud.automl.v1.ExportDataRequest\x1a\x1d.google.longrunning.Operation"\x84\x01\x82\xd3\xe4\x93\x02<"7/v1/{name=projects/*/locations/*/datasets/*}:exportData:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xbc\x01\n\x11GetAnnotationSpec\x12\x30.google.cloud.automl.v1.GetAnnotationSpecRequest\x1a&.google.cloud.automl.v1.AnnotationSpec"M\x82\xd3\xe4\x93\x02@\x12>/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}\xda\x41\x04name\x12\xbf\x01\n\x0b\x43reateModel\x12*.google.cloud.automl.v1.CreateModelRequest\x1a\x1d.google.longrunning.Operation"e\x82\xd3\xe4\x93\x02\x33"*/v1/{parent=projects/*/locations/*}/models:\x05model\xda\x41\x0cparent,model\xca\x41\x1a\n\x05Model\x12\x11OperationMetadata\x12\x8d\x01\n\x08GetModel\x12\'.google.cloud.automl.v1.GetModelRequest\x1a\x1d.google.cloud.automl.v1.Model"9\x82\xd3\xe4\x93\x02,\x12*/v1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\x12\xa0\x01\n\nListModels\x12).google.cloud.automl.v1.ListModelsRequest\x1a*.google.cloud.automl.v1.ListModelsResponse";\x82\xd3\xe4\x93\x02,\x12*/v1/{parent=projects/*/locations/*}/models\xda\x41\x06parent\x12\xc0\x01\n\x0b\x44\x65leteModel\x12*.google.cloud.automl.v1.DeleteModelRequest\x1a\x1d.google.longrunning.Operation"f\x82\xd3\xe4\x93\x02,**/v1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xad\x01\n\x0bUpdateModel\x12*.google.cloud.automl.v1.UpdateModelRequest\x1a\x1d.google.cloud.automl.v1.Model"S\x82\xd3\xe4\x93\x02\x39\x32\x30/v1/{model.name=projects/*/locations/*/models/*}:\x05model\xda\x41\x11model,update_mask\x12\xca\x01\n\x0b\x44\x65ployModel\x12*.google.cloud.automl.v1.DeployModelRequest\x1a\x1d.google.longrunning.Operation"p\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/locations/*/models/*}:deploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xd0\x01\n\rUndeployModel\x12,.google.cloud.automl.v1.UndeployModelRequest\x1a\x1d.google.longrunning.Operation"r\x82\xd3\xe4\x93\x02\x38"3/v1/{name=projects/*/locations/*/models/*}:undeploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xd8\x01\n\x0b\x45xportModel\x12*.google.cloud.automl.v1.ExportModelRequest\x1a\x1d.google.longrunning.Operation"~\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/locations/*/models/*}:export:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xbe\x01\n\x12GetModelEvaluation\x12\x31.google.cloud.automl.v1.GetModelEvaluationRequest\x1a\'.google.cloud.automl.v1.ModelEvaluation"L\x82\xd3\xe4\x93\x02?\x12=/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\xda\x41\x04name\x12\xd8\x01\n\x14ListModelEvaluations\x12\x33.google.cloud.automl.v1.ListModelEvaluationsRequest\x1a\x34.google.cloud.automl.v1.ListModelEvaluationsResponse"U\x82\xd3\xe4\x93\x02?\x12=/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations\xda\x41\rparent,filter\x1aI\xca\x41\x15\x61utoml.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xb7\x01\n\x1a\x63om.google.cloud.automl.v1B\x0b\x41utoMlProtoP\x01Z The dataset has - translation_dataset_metadata. - page_size: - Requested page size. Server may return fewer results than - requested. If unspecified, server will pick a default size. - page_token: - A token identifying a page of results for the server to return - Typically obtained via [ListDatasetsResponse.next_page_token][ - google.cloud.automl.v1.ListDatasetsResponse.next_page_token] - of the previous [AutoMl.ListDatasets][google.cloud.automl.v1.A - utoMl.ListDatasets] call. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListDatasetsRequest) - }, -) -_sym_db.RegisterMessage(ListDatasetsRequest) - -ListDatasetsResponse = _reflection.GeneratedProtocolMessageType( - "ListDatasetsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTDATASETSRESPONSE, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Response message for - [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. - - Attributes: - datasets: - The datasets read. - next_page_token: - A token to retrieve next page of results. Pass to [ListDataset - sRequest.page_token][google.cloud.automl.v1.ListDatasetsReques - t.page_token] to obtain that page. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListDatasetsResponse) - }, -) -_sym_db.RegisterMessage(ListDatasetsResponse) - -UpdateDatasetRequest = _reflection.GeneratedProtocolMessageType( - "UpdateDatasetRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEDATASETREQUEST, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] - - Attributes: - dataset: - Required. The dataset which replaces the resource on the - server. - update_mask: - Required. The update mask applies to the resource. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.UpdateDatasetRequest) - }, -) -_sym_db.RegisterMessage(UpdateDatasetRequest) - -DeleteDatasetRequest = _reflection.GeneratedProtocolMessageType( - "DeleteDatasetRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEDATASETREQUEST, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. - - Attributes: - name: - Required. The resource name of the dataset to delete. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DeleteDatasetRequest) - }, -) -_sym_db.RegisterMessage(DeleteDatasetRequest) - -ImportDataRequest = _reflection.GeneratedProtocolMessageType( - "ImportDataRequest", - (_message.Message,), - { - "DESCRIPTOR": _IMPORTDATAREQUEST, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. - - Attributes: - name: - Required. Dataset name. Dataset must already exist. All - imported annotations and examples will be added. - input_config: - Required. The desired input location and its domain specific - semantics, if any. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ImportDataRequest) - }, -) -_sym_db.RegisterMessage(ImportDataRequest) - -ExportDataRequest = _reflection.GeneratedProtocolMessageType( - "ExportDataRequest", - (_message.Message,), - { - "DESCRIPTOR": _EXPORTDATAREQUEST, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. - - Attributes: - name: - Required. The resource name of the dataset. - output_config: - Required. The desired output location. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ExportDataRequest) - }, -) -_sym_db.RegisterMessage(ExportDataRequest) - -GetAnnotationSpecRequest = _reflection.GeneratedProtocolMessageType( - "GetAnnotationSpecRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETANNOTATIONSPECREQUEST, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1. - AutoMl.GetAnnotationSpec]. - - Attributes: - name: - Required. The resource name of the annotation spec to - retrieve. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.GetAnnotationSpecRequest) - }, -) -_sym_db.RegisterMessage(GetAnnotationSpecRequest) - -CreateModelRequest = _reflection.GeneratedProtocolMessageType( - "CreateModelRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEMODELREQUEST, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. - - Attributes: - parent: - Required. Resource name of the parent project where the model - is being created. - model: - Required. The model to create. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.CreateModelRequest) - }, -) -_sym_db.RegisterMessage(CreateModelRequest) - -GetModelRequest = _reflection.GeneratedProtocolMessageType( - "GetModelRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETMODELREQUEST, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. - - Attributes: - name: - Required. Resource name of the model. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.GetModelRequest) - }, -) -_sym_db.RegisterMessage(GetModelRequest) - -ListModelsRequest = _reflection.GeneratedProtocolMessageType( - "ListModelsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTMODELSREQUEST, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. - - Attributes: - parent: - Required. Resource name of the project, from which to list the - models. - filter: - An expression for filtering the results of the request. - - ``model_metadata`` - for existence of the case ( - e.g. ``video_classification_model_metadata:*``). - ``dataset_id`` - - for = or !=. Some examples of using the filter are: - - ``image_classification_model_metadata:*`` –> The model has - image_classification_model_metadata. - ``dataset_id=5`` –> - The model was created from a dataset with ID 5. - page_size: - Requested page size. - page_token: - A token identifying a page of results for the server to return - Typically obtained via [ListModelsResponse.next_page_token][go - ogle.cloud.automl.v1.ListModelsResponse.next_page_token] of - the previous - [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels] - call. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListModelsRequest) - }, -) -_sym_db.RegisterMessage(ListModelsRequest) - -ListModelsResponse = _reflection.GeneratedProtocolMessageType( - "ListModelsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTMODELSRESPONSE, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Response message for - [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. - - Attributes: - model: - List of models in the requested page. - next_page_token: - A token to retrieve next page of results. Pass to [ListModelsR - equest.page_token][google.cloud.automl.v1.ListModelsRequest.pa - ge_token] to obtain that page. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListModelsResponse) - }, -) -_sym_db.RegisterMessage(ListModelsResponse) - -DeleteModelRequest = _reflection.GeneratedProtocolMessageType( - "DeleteModelRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEMODELREQUEST, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. - - Attributes: - name: - Required. Resource name of the model being deleted. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DeleteModelRequest) - }, -) -_sym_db.RegisterMessage(DeleteModelRequest) - -UpdateModelRequest = _reflection.GeneratedProtocolMessageType( - "UpdateModelRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEMODELREQUEST, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] - - Attributes: - model: - Required. The model which replaces the resource on the server. - update_mask: - Required. The update mask applies to the resource. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.UpdateModelRequest) - }, -) -_sym_db.RegisterMessage(UpdateModelRequest) - -DeployModelRequest = _reflection.GeneratedProtocolMessageType( - "DeployModelRequest", - (_message.Message,), - { - "DESCRIPTOR": _DEPLOYMODELREQUEST, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. - - Attributes: - model_deployment_metadata: - The per-domain specific deployment parameters. - image_object_detection_model_deployment_metadata: - Model deployment metadata specific to Image Object Detection. - image_classification_model_deployment_metadata: - Model deployment metadata specific to Image Classification. - name: - Required. Resource name of the model to deploy. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DeployModelRequest) - }, -) -_sym_db.RegisterMessage(DeployModelRequest) - -UndeployModelRequest = _reflection.GeneratedProtocolMessageType( - "UndeployModelRequest", - (_message.Message,), - { - "DESCRIPTOR": _UNDEPLOYMODELREQUEST, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. - - Attributes: - name: - Required. Resource name of the model to undeploy. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.UndeployModelRequest) - }, -) -_sym_db.RegisterMessage(UndeployModelRequest) - -ExportModelRequest = _reflection.GeneratedProtocolMessageType( - "ExportModelRequest", - (_message.Message,), - { - "DESCRIPTOR": _EXPORTMODELREQUEST, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. - Models need to be enabled for exporting, otherwise an error code will - be returned. - - Attributes: - name: - Required. The resource name of the model to export. - output_config: - Required. The desired output location and configuration. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ExportModelRequest) - }, -) -_sym_db.RegisterMessage(ExportModelRequest) - -GetModelEvaluationRequest = _reflection.GeneratedProtocolMessageType( - "GetModelEvaluationRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETMODELEVALUATIONREQUEST, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1 - .AutoMl.GetModelEvaluation]. - - Attributes: - name: - Required. Resource name for the model evaluation. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.GetModelEvaluationRequest) - }, -) -_sym_db.RegisterMessage(GetModelEvaluationRequest) - -ListModelEvaluationsRequest = _reflection.GeneratedProtocolMessageType( - "ListModelEvaluationsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTMODELEVALUATIONSREQUEST, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.ListModelEvaluations][google.cloud.automl. - v1.AutoMl.ListModelEvaluations]. - - Attributes: - parent: - Required. Resource name of the model to list the model - evaluations for. If modelId is set as “-”, this will list - model evaluations from across all models of the parent - location. - filter: - Required. An expression for filtering the results of the - request. - ``annotation_spec_id`` - for =, != or existence. - See example below for the last. Some examples of using the - filter are: - ``annotation_spec_id!=4`` –> The model - evaluation was done for annotation spec with ID different - than 4. - ``NOT annotation_spec_id:*`` –> The model - evaluation was done for aggregate of all annotation specs. - page_size: - Requested page size. - page_token: - A token identifying a page of results for the server to - return. Typically obtained via [ListModelEvaluationsResponse.n - ext_page_token][google.cloud.automl.v1.ListModelEvaluationsRes - ponse.next_page_token] of the previous [AutoMl.ListModelEvalua - tions][google.cloud.automl.v1.AutoMl.ListModelEvaluations] - call. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListModelEvaluationsRequest) - }, -) -_sym_db.RegisterMessage(ListModelEvaluationsRequest) - -ListModelEvaluationsResponse = _reflection.GeneratedProtocolMessageType( - "ListModelEvaluationsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTMODELEVALUATIONSRESPONSE, - "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Response message for [AutoMl.ListModelEvaluations][google.cloud.automl - .v1.AutoMl.ListModelEvaluations]. - - Attributes: - model_evaluation: - List of model evaluations in the requested page. - next_page_token: - A token to retrieve next page of results. Pass to the [ListMod - elEvaluationsRequest.page_token][google.cloud.automl.v1.ListMo - delEvaluationsRequest.page_token] field of a new [AutoMl.ListM - odelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvalua - tions] request to obtain that page. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListModelEvaluationsResponse) - }, -) -_sym_db.RegisterMessage(ListModelEvaluationsResponse) - - -DESCRIPTOR._options = None -_CREATEDATASETREQUEST.fields_by_name["parent"]._options = None -_CREATEDATASETREQUEST.fields_by_name["dataset"]._options = None -_GETDATASETREQUEST.fields_by_name["name"]._options = None -_LISTDATASETSREQUEST.fields_by_name["parent"]._options = None -_UPDATEDATASETREQUEST.fields_by_name["dataset"]._options = None -_UPDATEDATASETREQUEST.fields_by_name["update_mask"]._options = None -_DELETEDATASETREQUEST.fields_by_name["name"]._options = None -_IMPORTDATAREQUEST.fields_by_name["name"]._options = None -_IMPORTDATAREQUEST.fields_by_name["input_config"]._options = None -_EXPORTDATAREQUEST.fields_by_name["name"]._options = None -_EXPORTDATAREQUEST.fields_by_name["output_config"]._options = None -_GETANNOTATIONSPECREQUEST.fields_by_name["name"]._options = None -_CREATEMODELREQUEST.fields_by_name["parent"]._options = None -_CREATEMODELREQUEST.fields_by_name["model"]._options = None -_GETMODELREQUEST.fields_by_name["name"]._options = None -_LISTMODELSREQUEST.fields_by_name["parent"]._options = None -_DELETEMODELREQUEST.fields_by_name["name"]._options = None -_UPDATEMODELREQUEST.fields_by_name["model"]._options = None -_UPDATEMODELREQUEST.fields_by_name["update_mask"]._options = None -_DEPLOYMODELREQUEST.fields_by_name["name"]._options = None -_UNDEPLOYMODELREQUEST.fields_by_name["name"]._options = None -_EXPORTMODELREQUEST.fields_by_name["name"]._options = None -_EXPORTMODELREQUEST.fields_by_name["output_config"]._options = None -_GETMODELEVALUATIONREQUEST.fields_by_name["name"]._options = None -_LISTMODELEVALUATIONSREQUEST.fields_by_name["parent"]._options = None -_LISTMODELEVALUATIONSREQUEST.fields_by_name["filter"]._options = None - -_AUTOML = _descriptor.ServiceDescriptor( - name="AutoMl", - full_name="google.cloud.automl.v1.AutoMl", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\025automl.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", - create_key=_descriptor._internal_create_key, - serialized_start=3236, - serialized_end=6796, - methods=[ - _descriptor.MethodDescriptor( - name="CreateDataset", - full_name="google.cloud.automl.v1.AutoMl.CreateDataset", - index=0, - containing_service=None, - input_type=_CREATEDATASETREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0027",/v1/{parent=projects/*/locations/*}/datasets:\007dataset\332A\016parent,dataset\312A\034\n\007Dataset\022\021OperationMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetDataset", - full_name="google.cloud.automl.v1.AutoMl.GetDataset", - index=1, - containing_service=None, - input_type=_GETDATASETREQUEST, - output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2._DATASET, - serialized_options=b"\202\323\344\223\002.\022,/v1/{name=projects/*/locations/*/datasets/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListDatasets", - full_name="google.cloud.automl.v1.AutoMl.ListDatasets", - index=2, - containing_service=None, - input_type=_LISTDATASETSREQUEST, - output_type=_LISTDATASETSRESPONSE, - serialized_options=b"\202\323\344\223\002.\022,/v1/{parent=projects/*/locations/*}/datasets\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateDataset", - full_name="google.cloud.automl.v1.AutoMl.UpdateDataset", - index=3, - containing_service=None, - input_type=_UPDATEDATASETREQUEST, - output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2._DATASET, - serialized_options=b"\202\323\344\223\002?24/v1/{dataset.name=projects/*/locations/*/datasets/*}:\007dataset\332A\023dataset,update_mask", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteDataset", - full_name="google.cloud.automl.v1.AutoMl.DeleteDataset", - index=4, - containing_service=None, - input_type=_DELETEDATASETREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\002.*,/v1/{name=projects/*/locations/*/datasets/*}\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ImportData", - full_name="google.cloud.automl.v1.AutoMl.ImportData", - index=5, - containing_service=None, - input_type=_IMPORTDATAREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002<"7/v1/{name=projects/*/locations/*/datasets/*}:importData:\001*\332A\021name,input_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ExportData", - full_name="google.cloud.automl.v1.AutoMl.ExportData", - index=6, - containing_service=None, - input_type=_EXPORTDATAREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002<"7/v1/{name=projects/*/locations/*/datasets/*}:exportData:\001*\332A\022name,output_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetAnnotationSpec", - full_name="google.cloud.automl.v1.AutoMl.GetAnnotationSpec", - index=7, - containing_service=None, - input_type=_GETANNOTATIONSPECREQUEST, - output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2._ANNOTATIONSPEC, - serialized_options=b"\202\323\344\223\002@\022>/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateModel", - full_name="google.cloud.automl.v1.AutoMl.CreateModel", - index=8, - containing_service=None, - input_type=_CREATEMODELREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0023"*/v1/{parent=projects/*/locations/*}/models:\005model\332A\014parent,model\312A\032\n\005Model\022\021OperationMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetModel", - full_name="google.cloud.automl.v1.AutoMl.GetModel", - index=9, - containing_service=None, - input_type=_GETMODELREQUEST, - output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2._MODEL, - serialized_options=b"\202\323\344\223\002,\022*/v1/{name=projects/*/locations/*/models/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListModels", - full_name="google.cloud.automl.v1.AutoMl.ListModels", - index=10, - containing_service=None, - input_type=_LISTMODELSREQUEST, - output_type=_LISTMODELSRESPONSE, - serialized_options=b"\202\323\344\223\002,\022*/v1/{parent=projects/*/locations/*}/models\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteModel", - full_name="google.cloud.automl.v1.AutoMl.DeleteModel", - index=11, - containing_service=None, - input_type=_DELETEMODELREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\002,**/v1/{name=projects/*/locations/*/models/*}\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateModel", - full_name="google.cloud.automl.v1.AutoMl.UpdateModel", - index=12, - containing_service=None, - input_type=_UPDATEMODELREQUEST, - output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2._MODEL, - serialized_options=b"\202\323\344\223\002920/v1/{model.name=projects/*/locations/*/models/*}:\005model\332A\021model,update_mask", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeployModel", - full_name="google.cloud.automl.v1.AutoMl.DeployModel", - index=13, - containing_service=None, - input_type=_DEPLOYMODELREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0026"1/v1/{name=projects/*/locations/*/models/*}:deploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UndeployModel", - full_name="google.cloud.automl.v1.AutoMl.UndeployModel", - index=14, - containing_service=None, - input_type=_UNDEPLOYMODELREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0028"3/v1/{name=projects/*/locations/*/models/*}:undeploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ExportModel", - full_name="google.cloud.automl.v1.AutoMl.ExportModel", - index=15, - containing_service=None, - input_type=_EXPORTMODELREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0026"1/v1/{name=projects/*/locations/*/models/*}:export:\001*\332A\022name,output_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetModelEvaluation", - full_name="google.cloud.automl.v1.AutoMl.GetModelEvaluation", - index=16, - containing_service=None, - input_type=_GETMODELEVALUATIONREQUEST, - output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__evaluation__pb2._MODELEVALUATION, - serialized_options=b"\202\323\344\223\002?\022=/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListModelEvaluations", - full_name="google.cloud.automl.v1.AutoMl.ListModelEvaluations", - index=17, - containing_service=None, - input_type=_LISTMODELEVALUATIONSREQUEST, - output_type=_LISTMODELEVALUATIONSRESPONSE, - serialized_options=b"\202\323\344\223\002?\022=/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations\332A\rparent,filter", - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_AUTOML) - -DESCRIPTOR.services_by_name["AutoMl"] = _AUTOML - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1/proto/service_pb2_grpc.py b/google/cloud/automl_v1/proto/service_pb2_grpc.py deleted file mode 100644 index 2f9a2837..00000000 --- a/google/cloud/automl_v1/proto/service_pb2_grpc.py +++ /dev/null @@ -1,424 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.cloud.automl_v1.proto import ( - annotation_spec_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2, -) -from google.cloud.automl_v1.proto import ( - dataset_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2, -) -from google.cloud.automl_v1.proto import ( - model_evaluation_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_model__evaluation__pb2, -) -from google.cloud.automl_v1.proto import ( - model_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2, -) -from google.cloud.automl_v1.proto import ( - service_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) - - -class AutoMlStub(object): - """AutoML Server API. - - The resource names are assigned by the server. - The server never reuses names that it has created after the resources with - those names are deleted. - - An ID of a resource is the last element of the item's resource name. For - `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`, then - the id for the item is `{dataset_id}`. - - Currently the only supported `location_id` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateDataset = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/CreateDataset", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.CreateDatasetRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetDataset = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/GetDataset", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetDatasetRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2.Dataset.FromString, - ) - self.ListDatasets = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/ListDatasets", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListDatasetsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListDatasetsResponse.FromString, - ) - self.UpdateDataset = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/UpdateDataset", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UpdateDatasetRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2.Dataset.FromString, - ) - self.DeleteDataset = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/DeleteDataset", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeleteDatasetRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ImportData = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/ImportData", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ImportDataRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ExportData = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/ExportData", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportDataRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetAnnotationSpec = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/GetAnnotationSpec", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetAnnotationSpecRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2.AnnotationSpec.FromString, - ) - self.CreateModel = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/CreateModel", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.CreateModelRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetModel = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/GetModel", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetModelRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2.Model.FromString, - ) - self.ListModels = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/ListModels", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelsResponse.FromString, - ) - self.DeleteModel = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/DeleteModel", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeleteModelRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.UpdateModel = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/UpdateModel", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UpdateModelRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2.Model.FromString, - ) - self.DeployModel = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/DeployModel", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeployModelRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.UndeployModel = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/UndeployModel", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UndeployModelRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ExportModel = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/ExportModel", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportModelRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetModelEvaluation = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/GetModelEvaluation", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetModelEvaluationRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__evaluation__pb2.ModelEvaluation.FromString, - ) - self.ListModelEvaluations = channel.unary_unary( - "/google.cloud.automl.v1.AutoMl/ListModelEvaluations", - request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelEvaluationsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelEvaluationsResponse.FromString, - ) - - -class AutoMlServicer(object): - """AutoML Server API. - - The resource names are assigned by the server. - The server never reuses names that it has created after the resources with - those names are deleted. - - An ID of a resource is the last element of the item's resource name. For - `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`, then - the id for the item is `{dataset_id}`. - - Currently the only supported `location_id` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - """ - - def CreateDataset(self, request, context): - """Creates a dataset. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetDataset(self, request, context): - """Gets a dataset. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListDatasets(self, request, context): - """Lists datasets in a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateDataset(self, request, context): - """Updates a dataset. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteDataset(self, request, context): - """Deletes a dataset and all of its contents. - Returns empty response in the - [response][google.longrunning.Operation.response] field when it completes, - and `delete_details` in the - [metadata][google.longrunning.Operation.metadata] field. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ImportData(self, request, context): - """Imports data into a dataset. - For Tables this method can only be called on an empty Dataset. - - For Tables: - * A - [schema_inference_version][google.cloud.automl.v1.InputConfig.params] - parameter must be explicitly set. - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it completes. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ExportData(self, request, context): - """Exports dataset's data to the provided output location. - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it completes. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetAnnotationSpec(self, request, context): - """Gets an annotation spec. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateModel(self, request, context): - """Creates a model. - Returns a Model in the [response][google.longrunning.Operation.response] - field when it completes. - When you create a model, several model evaluations are created for it: - a global evaluation, and one evaluation for each annotation spec. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetModel(self, request, context): - """Gets a model. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListModels(self, request, context): - """Lists models. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteModel(self, request, context): - """Deletes a model. - Returns `google.protobuf.Empty` in the - [response][google.longrunning.Operation.response] field when it completes, - and `delete_details` in the - [metadata][google.longrunning.Operation.metadata] field. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateModel(self, request, context): - """Updates a model. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeployModel(self, request, context): - """Deploys a model. If a model is already deployed, deploying it with the - same parameters has no effect. Deploying with different parametrs - (as e.g. changing - - [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number]) - will reset the deployment state without pausing the model's availability. - - Only applicable for Text Classification, Image Object Detection , Tables, and Image Segmentation; all other domains manage - deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it completes. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UndeployModel(self, request, context): - """Undeploys a model. If the model is not deployed this method has no effect. - - Only applicable for Text Classification, Image Object Detection and Tables; - all other domains manage deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it completes. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ExportModel(self, request, context): - """Exports a trained, "export-able", model to a user specified Google Cloud - Storage location. A model is considered export-able if and only if it has - an export format defined for it in - [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it completes. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetModelEvaluation(self, request, context): - """Gets a model evaluation. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListModelEvaluations(self, request, context): - """Lists model evaluations. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_AutoMlServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateDataset": grpc.unary_unary_rpc_method_handler( - servicer.CreateDataset, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.CreateDatasetRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetDataset": grpc.unary_unary_rpc_method_handler( - servicer.GetDataset, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetDatasetRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2.Dataset.SerializeToString, - ), - "ListDatasets": grpc.unary_unary_rpc_method_handler( - servicer.ListDatasets, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListDatasetsRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListDatasetsResponse.SerializeToString, - ), - "UpdateDataset": grpc.unary_unary_rpc_method_handler( - servicer.UpdateDataset, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UpdateDatasetRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2.Dataset.SerializeToString, - ), - "DeleteDataset": grpc.unary_unary_rpc_method_handler( - servicer.DeleteDataset, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeleteDatasetRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ImportData": grpc.unary_unary_rpc_method_handler( - servicer.ImportData, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ImportDataRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ExportData": grpc.unary_unary_rpc_method_handler( - servicer.ExportData, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportDataRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetAnnotationSpec": grpc.unary_unary_rpc_method_handler( - servicer.GetAnnotationSpec, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetAnnotationSpecRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2.AnnotationSpec.SerializeToString, - ), - "CreateModel": grpc.unary_unary_rpc_method_handler( - servicer.CreateModel, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.CreateModelRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetModel": grpc.unary_unary_rpc_method_handler( - servicer.GetModel, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetModelRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2.Model.SerializeToString, - ), - "ListModels": grpc.unary_unary_rpc_method_handler( - servicer.ListModels, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelsRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelsResponse.SerializeToString, - ), - "DeleteModel": grpc.unary_unary_rpc_method_handler( - servicer.DeleteModel, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeleteModelRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "UpdateModel": grpc.unary_unary_rpc_method_handler( - servicer.UpdateModel, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UpdateModelRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2.Model.SerializeToString, - ), - "DeployModel": grpc.unary_unary_rpc_method_handler( - servicer.DeployModel, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeployModelRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "UndeployModel": grpc.unary_unary_rpc_method_handler( - servicer.UndeployModel, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UndeployModelRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ExportModel": grpc.unary_unary_rpc_method_handler( - servicer.ExportModel, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportModelRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetModelEvaluation": grpc.unary_unary_rpc_method_handler( - servicer.GetModelEvaluation, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetModelEvaluationRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__evaluation__pb2.ModelEvaluation.SerializeToString, - ), - "ListModelEvaluations": grpc.unary_unary_rpc_method_handler( - servicer.ListModelEvaluations, - request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelEvaluationsRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelEvaluationsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.cloud.automl.v1.AutoMl", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/google/cloud/automl_v1/proto/text_extraction_pb2.py b/google/cloud/automl_v1/proto/text_extraction_pb2.py deleted file mode 100644 index 951204b3..00000000 --- a/google/cloud/automl_v1/proto/text_extraction_pb2.py +++ /dev/null @@ -1,354 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1/proto/text_extraction.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.automl_v1.proto import ( - text_segment_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_text__segment__pb2, -) -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1/proto/text_extraction.proto", - package="google.cloud.automl.v1", - syntax="proto3", - serialized_options=b"\n\032com.google.cloud.automl.v1P\001Z None: + """Instantiate the auto ml client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.AutoMlTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = AutoMlClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_dataset( + self, + request: service.CreateDatasetRequest = None, + *, + parent: str = None, + dataset: gca_dataset.Dataset = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a dataset. + + Args: + request (:class:`~.service.CreateDatasetRequest`): + The request object. Request message for + [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. + parent (:class:`str`): + Required. The resource name of the + project to create the dataset for. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (:class:`~.gca_dataset.Dataset`): + Required. The dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.gca_dataset.Dataset``: A workspace for + solving a single, particular machine learning (ML) + problem. A workspace contains examples that may be + annotated. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent, dataset]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.CreateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_dataset.Dataset, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def get_dataset( + self, + request: service.GetDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a dataset. + + Args: + request (:class:`~.service.GetDatasetRequest`): + The request object. Request message for + [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. + name (:class:`str`): + Required. The resource name of the + dataset to retrieve. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.dataset.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_dataset, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_datasets( + self, + request: service.ListDatasetsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsAsyncPager: + r"""Lists datasets in a project. + + Args: + request (:class:`~.service.ListDatasetsRequest`): + The request object. Request message for + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + parent (:class:`str`): + Required. The resource name of the + project from which to list datasets. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListDatasetsAsyncPager: + Response message for + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ListDatasetsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_datasets, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDatasetsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_dataset( + self, + request: service.UpdateDatasetRequest = None, + *, + dataset: gca_dataset.Dataset = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a dataset. + + Args: + request (:class:`~.service.UpdateDatasetRequest`): + The request object. Request message for + [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] + dataset (:class:`~.gca_dataset.Dataset`): + Required. The dataset which replaces + the resource on the server. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`~.field_mask.FieldMask`): + Required. The update mask applies to + the resource. + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_dataset.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([dataset, update_mask]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.UpdateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if dataset is not None: + request.dataset = dataset + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("dataset.name", request.dataset.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_dataset( + self, + request: service.DeleteDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Args: + request (:class:`~.service.DeleteDatasetRequest`): + The request object. Request message for + [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. + name (:class:`str`): + Required. The resource name of the + dataset to delete. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.DeleteDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_dataset, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def import_data( + self, + request: service.ImportDataRequest = None, + *, + name: str = None, + input_config: io.InputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + Args: + request (:class:`~.service.ImportDataRequest`): + The request object. Request message for + [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. + name (:class:`str`): + Required. Dataset name. Dataset must + already exist. All imported annotations + and examples will be added. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (:class:`~.io.InputConfig`): + Required. The desired input location + and its domain specific semantics, if + any. + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, input_config]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ImportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_data, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def export_data( + self, + request: service.ExportDataRequest = None, + *, + name: str = None, + output_config: io.OutputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.ExportDataRequest`): + The request object. Request message for + [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. + name (:class:`str`): + Required. The resource name of the + dataset. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`~.io.OutputConfig`): + Required. The desired output + location. + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, output_config]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ExportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_data, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def get_annotation_spec( + self, + request: service.GetAnnotationSpecRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an annotation spec. + + Args: + request (:class:`~.service.GetAnnotationSpecRequest`): + The request object. Request message for + [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. + name (:class:`str`): + Required. The resource name of the + annotation spec to retrieve. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.annotation_spec.AnnotationSpec: + A definition of an annotation spec. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetAnnotationSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_annotation_spec, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_model( + self, + request: service.CreateModelRequest = None, + *, + parent: str = None, + model: gca_model.Model = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + Args: + request (:class:`~.service.CreateModelRequest`): + The request object. Request message for + [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. + parent (:class:`str`): + Required. Resource name of the parent + project where the model is being + created. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (:class:`~.gca_model.Model`): + Required. The model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.gca_model.Model``: API proto representing a + trained machine learning model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent, model]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.CreateModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_model.Model, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def get_model( + self, + request: service.GetModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a model. + + Args: + request (:class:`~.service.GetModelRequest`): + The request object. Request message for + [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. + name (:class:`str`): + Required. Resource name of the model. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model.Model: + API proto representing a trained + machine learning model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_models( + self, + request: service.ListModelsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsAsyncPager: + r"""Lists models. + + Args: + request (:class:`~.service.ListModelsRequest`): + The request object. Request message for + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + parent (:class:`str`): + Required. Resource name of the + project, from which to list the models. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListModelsAsyncPager: + Response message for + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ListModelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_models, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_model( + self, + request: service.DeleteModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Args: + request (:class:`~.service.DeleteModelRequest`): + The request object. Request message for + [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. + name (:class:`str`): + Required. Resource name of the model + being deleted. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.DeleteModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def update_model( + self, + request: service.UpdateModelRequest = None, + *, + model: gca_model.Model = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model.Model: + r"""Updates a model. + + Args: + request (:class:`~.service.UpdateModelRequest`): + The request object. Request message for + [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] + model (:class:`~.gca_model.Model`): + Required. The model which replaces + the resource on the server. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`~.field_mask.FieldMask`): + Required. The update mask applies to + the resource. + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_model.Model: + API proto representing a trained + machine learning model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([model, update_mask]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.UpdateModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if model is not None: + request.model = model + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("model.name", request.model.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def deploy_model( + self, + request: service.DeployModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + + [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.DeployModelRequest`): + The request object. Request message for + [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. + name (:class:`str`): + Required. Resource name of the model + to deploy. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.DeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.deploy_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def undeploy_model( + self, + request: service.UndeployModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.UndeployModelRequest`): + The request object. Request message for + [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. + name (:class:`str`): + Required. Resource name of the model + to undeploy. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.UndeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.undeploy_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def export_model( + self, + request: service.ExportModelRequest = None, + *, + name: str = None, + output_config: io.ModelExportOutputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.ExportModelRequest`): + The request object. Request message for + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an + error code will be returned. + name (:class:`str`): + Required. The resource name of the + model to export. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`~.io.ModelExportOutputConfig`): + Required. The desired output location + and configuration. + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, output_config]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ExportModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def get_model_evaluation( + self, + request: service.GetModelEvaluationRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a model evaluation. + + Args: + request (:class:`~.service.GetModelEvaluationRequest`): + The request object. Request message for + [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. + name (:class:`str`): + Required. Resource name for the model + evaluation. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model_evaluation.ModelEvaluation: + Evaluation results of a model. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetModelEvaluationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_evaluation, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_model_evaluations( + self, + request: service.ListModelEvaluationsRequest = None, + *, + parent: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsAsyncPager: + r"""Lists model evaluations. + + Args: + request (:class:`~.service.ListModelEvaluationsRequest`): + The request object. Request message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. + parent (:class:`str`): + Required. Resource name of the model + to list the model evaluations for. If + modelId is set as "-", this will list + model evaluations from across all models + of the parent location. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Required. An expression for filtering the results of the + request. + + - ``annotation_spec_id`` - for =, != or existence. See + example below for the last. + + Some examples of using the filter are: + + - ``annotation_spec_id!=4`` --> The model evaluation + was done for annotation spec with ID different than + 4. + - ``NOT annotation_spec_id:*`` --> The model evaluation + was done for aggregate of all annotation specs. + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListModelEvaluationsAsyncPager: + Response message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent, filter]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ListModelEvaluationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_evaluations, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelEvaluationsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-automl",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("AutoMlAsyncClient",) diff --git a/google/cloud/automl_v1/services/auto_ml/client.py b/google/cloud/automl_v1/services/auto_ml/client.py new file mode 100644 index 00000000..e615ce00 --- /dev/null +++ b/google/cloud/automl_v1/services/auto_ml/client.py @@ -0,0 +1,1972 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import os +import re +from typing import Callable, Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.automl_v1.services.auto_ml import pagers +from google.cloud.automl_v1.types import annotation_spec +from google.cloud.automl_v1.types import classification +from google.cloud.automl_v1.types import dataset +from google.cloud.automl_v1.types import dataset as gca_dataset +from google.cloud.automl_v1.types import detection +from google.cloud.automl_v1.types import image +from google.cloud.automl_v1.types import io +from google.cloud.automl_v1.types import model +from google.cloud.automl_v1.types import model as gca_model +from google.cloud.automl_v1.types import model_evaluation +from google.cloud.automl_v1.types import operations +from google.cloud.automl_v1.types import service +from google.cloud.automl_v1.types import text +from google.cloud.automl_v1.types import text_extraction +from google.cloud.automl_v1.types import text_sentiment +from google.cloud.automl_v1.types import translation +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import AutoMlGrpcTransport +from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport + + +class AutoMlClientMeta(type): + """Metaclass for the AutoMl client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[AutoMlTransport]] + _transport_registry["grpc"] = AutoMlGrpcTransport + _transport_registry["grpc_asyncio"] = AutoMlGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[AutoMlTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class AutoMlClient(metaclass=AutoMlClientMeta): + """AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "automl.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @staticmethod + def dataset_path(project: str, location: str, dataset: str,) -> str: + """Return a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, + ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str, str]: + """Parse a dataset path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str, location: str, model: str,) -> str: + """Return a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format( + project=project, location=location, model=model, + ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str, str]: + """Parse a model path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, AutoMlTransport] = None, + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the auto ml client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.AutoMlTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = ClientOptions.from_dict(client_options) + if client_options is None: + client_options = ClientOptions.ClientOptions() + + if client_options.api_endpoint is None: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + if use_mtls_env == "never": + client_options.api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + has_client_cert_source = ( + client_options.client_cert_source is not None + or mtls.has_default_client_cert_source() + ) + client_options.api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT + if has_client_cert_source + else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, AutoMlTransport): + # transport is a AutoMlTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=client_options.api_endpoint, + scopes=client_options.scopes, + api_mtls_endpoint=client_options.api_endpoint, + client_cert_source=client_options.client_cert_source, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_dataset( + self, + request: service.CreateDatasetRequest = None, + *, + parent: str = None, + dataset: gca_dataset.Dataset = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a dataset. + + Args: + request (:class:`~.service.CreateDatasetRequest`): + The request object. Request message for + [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. + parent (:class:`str`): + Required. The resource name of the + project to create the dataset for. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (:class:`~.gca_dataset.Dataset`): + Required. The dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.gca_dataset.Dataset``: A workspace for + solving a single, particular machine learning (ML) + problem. A workspace contains examples that may be + annotated. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, dataset]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.CreateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.CreateDatasetRequest): + request = service.CreateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gca_dataset.Dataset, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def get_dataset( + self, + request: service.GetDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a dataset. + + Args: + request (:class:`~.service.GetDatasetRequest`): + The request object. Request message for + [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. + name (:class:`str`): + Required. The resource name of the + dataset to retrieve. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.dataset.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetDatasetRequest): + request = service.GetDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_datasets( + self, + request: service.ListDatasetsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsPager: + r"""Lists datasets in a project. + + Args: + request (:class:`~.service.ListDatasetsRequest`): + The request object. Request message for + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + parent (:class:`str`): + Required. The resource name of the + project from which to list datasets. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListDatasetsPager: + Response message for + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListDatasetsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListDatasetsRequest): + request = service.ListDatasetsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_datasets] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDatasetsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_dataset( + self, + request: service.UpdateDatasetRequest = None, + *, + dataset: gca_dataset.Dataset = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a dataset. + + Args: + request (:class:`~.service.UpdateDatasetRequest`): + The request object. Request message for + [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] + dataset (:class:`~.gca_dataset.Dataset`): + Required. The dataset which replaces + the resource on the server. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`~.field_mask.FieldMask`): + Required. The update mask applies to + the resource. + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_dataset.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([dataset, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.UpdateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.UpdateDatasetRequest): + request = service.UpdateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if dataset is not None: + request.dataset = dataset + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("dataset.name", request.dataset.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_dataset( + self, + request: service.DeleteDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Args: + request (:class:`~.service.DeleteDatasetRequest`): + The request object. Request message for + [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. + name (:class:`str`): + Required. The resource name of the + dataset to delete. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.DeleteDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.DeleteDatasetRequest): + request = service.DeleteDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def import_data( + self, + request: service.ImportDataRequest = None, + *, + name: str = None, + input_config: io.InputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + Args: + request (:class:`~.service.ImportDataRequest`): + The request object. Request message for + [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. + name (:class:`str`): + Required. Dataset name. Dataset must + already exist. All imported annotations + and examples will be added. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (:class:`~.io.InputConfig`): + Required. The desired input location + and its domain specific semantics, if + any. + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, input_config]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ImportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ImportDataRequest): + request = service.ImportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def export_data( + self, + request: service.ExportDataRequest = None, + *, + name: str = None, + output_config: io.OutputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.ExportDataRequest`): + The request object. Request message for + [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. + name (:class:`str`): + Required. The resource name of the + dataset. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`~.io.OutputConfig`): + Required. The desired output + location. + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ExportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ExportDataRequest): + request = service.ExportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def get_annotation_spec( + self, + request: service.GetAnnotationSpecRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an annotation spec. + + Args: + request (:class:`~.service.GetAnnotationSpecRequest`): + The request object. Request message for + [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. + name (:class:`str`): + Required. The resource name of the + annotation spec to retrieve. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.annotation_spec.AnnotationSpec: + A definition of an annotation spec. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetAnnotationSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetAnnotationSpecRequest): + request = service.GetAnnotationSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_model( + self, + request: service.CreateModelRequest = None, + *, + parent: str = None, + model: gca_model.Model = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + Args: + request (:class:`~.service.CreateModelRequest`): + The request object. Request message for + [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. + parent (:class:`str`): + Required. Resource name of the parent + project where the model is being + created. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (:class:`~.gca_model.Model`): + Required. The model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.gca_model.Model``: API proto representing a + trained machine learning model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.CreateModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.CreateModelRequest): + request = service.CreateModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gca_model.Model, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def get_model( + self, + request: service.GetModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a model. + + Args: + request (:class:`~.service.GetModelRequest`): + The request object. Request message for + [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. + name (:class:`str`): + Required. Resource name of the model. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model.Model: + API proto representing a trained + machine learning model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetModelRequest): + request = service.GetModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_models( + self, + request: service.ListModelsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsPager: + r"""Lists models. + + Args: + request (:class:`~.service.ListModelsRequest`): + The request object. Request message for + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + parent (:class:`str`): + Required. Resource name of the + project, from which to list the models. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListModelsPager: + Response message for + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListModelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListModelsRequest): + request = service.ListModelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_models] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_model( + self, + request: service.DeleteModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Args: + request (:class:`~.service.DeleteModelRequest`): + The request object. Request message for + [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. + name (:class:`str`): + Required. Resource name of the model + being deleted. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.DeleteModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.DeleteModelRequest): + request = service.DeleteModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def update_model( + self, + request: service.UpdateModelRequest = None, + *, + model: gca_model.Model = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model.Model: + r"""Updates a model. + + Args: + request (:class:`~.service.UpdateModelRequest`): + The request object. Request message for + [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] + model (:class:`~.gca_model.Model`): + Required. The model which replaces + the resource on the server. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`~.field_mask.FieldMask`): + Required. The update mask applies to + the resource. + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_model.Model: + API proto representing a trained + machine learning model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.UpdateModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.UpdateModelRequest): + request = service.UpdateModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if model is not None: + request.model = model + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("model.name", request.model.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def deploy_model( + self, + request: service.DeployModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + + [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.DeployModelRequest`): + The request object. Request message for + [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. + name (:class:`str`): + Required. Resource name of the model + to deploy. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.DeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.DeployModelRequest): + request = service.DeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.deploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def undeploy_model( + self, + request: service.UndeployModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.UndeployModelRequest`): + The request object. Request message for + [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. + name (:class:`str`): + Required. Resource name of the model + to undeploy. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.UndeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.UndeployModelRequest): + request = service.UndeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.undeploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def export_model( + self, + request: service.ExportModelRequest = None, + *, + name: str = None, + output_config: io.ModelExportOutputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.ExportModelRequest`): + The request object. Request message for + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an + error code will be returned. + name (:class:`str`): + Required. The resource name of the + model to export. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`~.io.ModelExportOutputConfig`): + Required. The desired output location + and configuration. + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ExportModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ExportModelRequest): + request = service.ExportModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def get_model_evaluation( + self, + request: service.GetModelEvaluationRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a model evaluation. + + Args: + request (:class:`~.service.GetModelEvaluationRequest`): + The request object. Request message for + [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. + name (:class:`str`): + Required. Resource name for the model + evaluation. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model_evaluation.ModelEvaluation: + Evaluation results of a model. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetModelEvaluationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetModelEvaluationRequest): + request = service.GetModelEvaluationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_model_evaluations( + self, + request: service.ListModelEvaluationsRequest = None, + *, + parent: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsPager: + r"""Lists model evaluations. + + Args: + request (:class:`~.service.ListModelEvaluationsRequest`): + The request object. Request message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. + parent (:class:`str`): + Required. Resource name of the model + to list the model evaluations for. If + modelId is set as "-", this will list + model evaluations from across all models + of the parent location. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Required. An expression for filtering the results of the + request. + + - ``annotation_spec_id`` - for =, != or existence. See + example below for the last. + + Some examples of using the filter are: + + - ``annotation_spec_id!=4`` --> The model evaluation + was done for annotation spec with ID different than + 4. + - ``NOT annotation_spec_id:*`` --> The model evaluation + was done for aggregate of all annotation specs. + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListModelEvaluationsPager: + Response message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, filter]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListModelEvaluationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListModelEvaluationsRequest): + request = service.ListModelEvaluationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_evaluations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelEvaluationsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-automl",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("AutoMlClient",) diff --git a/google/cloud/automl_v1/services/auto_ml/pagers.py b/google/cloud/automl_v1/services/auto_ml/pagers.py new file mode 100644 index 00000000..4de690c7 --- /dev/null +++ b/google/cloud/automl_v1/services/auto_ml/pagers.py @@ -0,0 +1,407 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.automl_v1.types import dataset +from google.cloud.automl_v1.types import model +from google.cloud.automl_v1.types import model_evaluation +from google.cloud.automl_v1.types import service + + +class ListDatasetsPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`~.service.ListDatasetsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`~.service.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service.ListDatasetsResponse], + request: service.ListDatasetsRequest, + response: service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListDatasetsRequest`): + The initial request object. + response (:class:`~.service.ListDatasetsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[dataset.Dataset]: + for page in self.pages: + yield from page.datasets + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDatasetsAsyncPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`~.service.ListDatasetsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`~.service.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[service.ListDatasetsResponse]], + request: service.ListDatasetsRequest, + response: service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListDatasetsRequest`): + The initial request object. + response (:class:`~.service.ListDatasetsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[dataset.Dataset]: + async def async_generator(): + async for page in self.pages: + for response in page.datasets: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelsPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`~.service.ListModelsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``model`` field on the + corresponding responses. + + All the usual :class:`~.service.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service.ListModelsResponse], + request: service.ListModelsRequest, + response: service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListModelsRequest`): + The initial request object. + response (:class:`~.service.ListModelsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[model.Model]: + for page in self.pages: + yield from page.model + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelsAsyncPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`~.service.ListModelsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``model`` field on the + corresponding responses. + + All the usual :class:`~.service.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[service.ListModelsResponse]], + request: service.ListModelsRequest, + response: service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListModelsRequest`): + The initial request object. + response (:class:`~.service.ListModelsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[model.Model]: + async def async_generator(): + async for page in self.pages: + for response in page.model: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`~.service.ListModelEvaluationsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_evaluation`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluation`` field on the + corresponding responses. + + All the usual :class:`~.service.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service.ListModelEvaluationsResponse], + request: service.ListModelEvaluationsRequest, + response: service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListModelEvaluationsRequest`): + The initial request object. + response (:class:`~.service.ListModelEvaluationsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[model_evaluation.ModelEvaluation]: + for page in self.pages: + yield from page.model_evaluation + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsAsyncPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`~.service.ListModelEvaluationsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_evaluation`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluation`` field on the + corresponding responses. + + All the usual :class:`~.service.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[service.ListModelEvaluationsResponse]], + request: service.ListModelEvaluationsRequest, + response: service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListModelEvaluationsRequest`): + The initial request object. + response (:class:`~.service.ListModelEvaluationsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[model_evaluation.ModelEvaluation]: + async def async_generator(): + async for page in self.pages: + for response in page.model_evaluation: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/automl_v1/services/auto_ml/transports/__init__.py b/google/cloud/automl_v1/services/auto_ml/transports/__init__.py new file mode 100644 index 00000000..9e5456eb --- /dev/null +++ b/google/cloud/automl_v1/services/auto_ml/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import AutoMlTransport +from .grpc import AutoMlGrpcTransport +from .grpc_asyncio import AutoMlGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[AutoMlTransport]] +_transport_registry["grpc"] = AutoMlGrpcTransport +_transport_registry["grpc_asyncio"] = AutoMlGrpcAsyncIOTransport + + +__all__ = ( + "AutoMlTransport", + "AutoMlGrpcTransport", + "AutoMlGrpcAsyncIOTransport", +) diff --git a/google/cloud/automl_v1/services/auto_ml/transports/base.py b/google/cloud/automl_v1/services/auto_ml/transports/base.py new file mode 100644 index 00000000..5e230105 --- /dev/null +++ b/google/cloud/automl_v1/services/auto_ml/transports/base.py @@ -0,0 +1,442 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.automl_v1.types import annotation_spec +from google.cloud.automl_v1.types import dataset +from google.cloud.automl_v1.types import dataset as gca_dataset +from google.cloud.automl_v1.types import model +from google.cloud.automl_v1.types import model as gca_model +from google.cloud.automl_v1.types import model_evaluation +from google.cloud.automl_v1.types import service +from google.longrunning import operations_pb2 as operations # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-automl",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class AutoMlTransport(abc.ABC): + """Abstract transport class for AutoMl.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_dataset: gapic_v1.method.wrap_method( + self.create_dataset, default_timeout=5.0, client_info=client_info, + ), + self.get_dataset: gapic_v1.method.wrap_method( + self.get_dataset, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_datasets: gapic_v1.method.wrap_method( + self.list_datasets, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.update_dataset: gapic_v1.method.wrap_method( + self.update_dataset, default_timeout=5.0, client_info=client_info, + ), + self.delete_dataset: gapic_v1.method.wrap_method( + self.delete_dataset, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.import_data: gapic_v1.method.wrap_method( + self.import_data, default_timeout=5.0, client_info=client_info, + ), + self.export_data: gapic_v1.method.wrap_method( + self.export_data, default_timeout=5.0, client_info=client_info, + ), + self.get_annotation_spec: gapic_v1.method.wrap_method( + self.get_annotation_spec, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.create_model: gapic_v1.method.wrap_method( + self.create_model, default_timeout=5.0, client_info=client_info, + ), + self.get_model: gapic_v1.method.wrap_method( + self.get_model, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_models: gapic_v1.method.wrap_method( + self.list_models, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.delete_model: gapic_v1.method.wrap_method( + self.delete_model, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.update_model: gapic_v1.method.wrap_method( + self.update_model, default_timeout=5.0, client_info=client_info, + ), + self.deploy_model: gapic_v1.method.wrap_method( + self.deploy_model, default_timeout=5.0, client_info=client_info, + ), + self.undeploy_model: gapic_v1.method.wrap_method( + self.undeploy_model, default_timeout=5.0, client_info=client_info, + ), + self.export_model: gapic_v1.method.wrap_method( + self.export_model, default_timeout=5.0, client_info=client_info, + ), + self.get_model_evaluation: gapic_v1.method.wrap_method( + self.get_model_evaluation, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_model_evaluations: gapic_v1.method.wrap_method( + self.list_model_evaluations, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_dataset( + self, + ) -> typing.Callable[ + [service.CreateDatasetRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_dataset( + self, + ) -> typing.Callable[ + [service.GetDatasetRequest], + typing.Union[dataset.Dataset, typing.Awaitable[dataset.Dataset]], + ]: + raise NotImplementedError() + + @property + def list_datasets( + self, + ) -> typing.Callable[ + [service.ListDatasetsRequest], + typing.Union[ + service.ListDatasetsResponse, typing.Awaitable[service.ListDatasetsResponse] + ], + ]: + raise NotImplementedError() + + @property + def update_dataset( + self, + ) -> typing.Callable[ + [service.UpdateDatasetRequest], + typing.Union[gca_dataset.Dataset, typing.Awaitable[gca_dataset.Dataset]], + ]: + raise NotImplementedError() + + @property + def delete_dataset( + self, + ) -> typing.Callable[ + [service.DeleteDatasetRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def import_data( + self, + ) -> typing.Callable[ + [service.ImportDataRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def export_data( + self, + ) -> typing.Callable[ + [service.ExportDataRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_annotation_spec( + self, + ) -> typing.Callable[ + [service.GetAnnotationSpecRequest], + typing.Union[ + annotation_spec.AnnotationSpec, + typing.Awaitable[annotation_spec.AnnotationSpec], + ], + ]: + raise NotImplementedError() + + @property + def create_model( + self, + ) -> typing.Callable[ + [service.CreateModelRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_model( + self, + ) -> typing.Callable[ + [service.GetModelRequest], + typing.Union[model.Model, typing.Awaitable[model.Model]], + ]: + raise NotImplementedError() + + @property + def list_models( + self, + ) -> typing.Callable[ + [service.ListModelsRequest], + typing.Union[ + service.ListModelsResponse, typing.Awaitable[service.ListModelsResponse] + ], + ]: + raise NotImplementedError() + + @property + def delete_model( + self, + ) -> typing.Callable[ + [service.DeleteModelRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def update_model( + self, + ) -> typing.Callable[ + [service.UpdateModelRequest], + typing.Union[gca_model.Model, typing.Awaitable[gca_model.Model]], + ]: + raise NotImplementedError() + + @property + def deploy_model( + self, + ) -> typing.Callable[ + [service.DeployModelRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def undeploy_model( + self, + ) -> typing.Callable[ + [service.UndeployModelRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def export_model( + self, + ) -> typing.Callable[ + [service.ExportModelRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_model_evaluation( + self, + ) -> typing.Callable[ + [service.GetModelEvaluationRequest], + typing.Union[ + model_evaluation.ModelEvaluation, + typing.Awaitable[model_evaluation.ModelEvaluation], + ], + ]: + raise NotImplementedError() + + @property + def list_model_evaluations( + self, + ) -> typing.Callable[ + [service.ListModelEvaluationsRequest], + typing.Union[ + service.ListModelEvaluationsResponse, + typing.Awaitable[service.ListModelEvaluationsResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("AutoMlTransport",) diff --git a/google/cloud/automl_v1/services/auto_ml/transports/grpc.py b/google/cloud/automl_v1/services/auto_ml/transports/grpc.py new file mode 100644 index 00000000..100f50f6 --- /dev/null +++ b/google/cloud/automl_v1/services/auto_ml/transports/grpc.py @@ -0,0 +1,771 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + + +import grpc # type: ignore + +from google.cloud.automl_v1.types import annotation_spec +from google.cloud.automl_v1.types import dataset +from google.cloud.automl_v1.types import dataset as gca_dataset +from google.cloud.automl_v1.types import model +from google.cloud.automl_v1.types import model as gca_model +from google.cloud.automl_v1.types import model_evaluation +from google.cloud.automl_v1.types import service +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import AutoMlTransport, DEFAULT_CLIENT_INFO + + +class AutoMlGrpcTransport(AutoMlTransport): + """gRPC backend transport for AutoMl. + + AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def create_dataset( + self, + ) -> Callable[[service.CreateDatasetRequest], operations.Operation]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_dataset" not in self._stubs: + self._stubs["create_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/CreateDataset", + request_serializer=service.CreateDatasetRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_dataset"] + + @property + def get_dataset(self) -> Callable[[service.GetDatasetRequest], dataset.Dataset]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a dataset. + + Returns: + Callable[[~.GetDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_dataset" not in self._stubs: + self._stubs["get_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/GetDataset", + request_serializer=service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs["get_dataset"] + + @property + def list_datasets( + self, + ) -> Callable[[service.ListDatasetsRequest], service.ListDatasetsResponse]: + r"""Return a callable for the list datasets method over gRPC. + + Lists datasets in a project. + + Returns: + Callable[[~.ListDatasetsRequest], + ~.ListDatasetsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_datasets" not in self._stubs: + self._stubs["list_datasets"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/ListDatasets", + request_serializer=service.ListDatasetsRequest.serialize, + response_deserializer=service.ListDatasetsResponse.deserialize, + ) + return self._stubs["list_datasets"] + + @property + def update_dataset( + self, + ) -> Callable[[service.UpdateDatasetRequest], gca_dataset.Dataset]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_dataset" not in self._stubs: + self._stubs["update_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/UpdateDataset", + request_serializer=service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs["update_dataset"] + + @property + def delete_dataset( + self, + ) -> Callable[[service.DeleteDatasetRequest], operations.Operation]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteDatasetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_dataset" not in self._stubs: + self._stubs["delete_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/DeleteDataset", + request_serializer=service.DeleteDatasetRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_dataset"] + + @property + def import_data( + self, + ) -> Callable[[service.ImportDataRequest], operations.Operation]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + Returns: + Callable[[~.ImportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_data" not in self._stubs: + self._stubs["import_data"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/ImportData", + request_serializer=service.ImportDataRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["import_data"] + + @property + def export_data( + self, + ) -> Callable[[service.ExportDataRequest], operations.Operation]: + r"""Return a callable for the export data method over gRPC. + + Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_data" not in self._stubs: + self._stubs["export_data"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/ExportData", + request_serializer=service.ExportDataRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["export_data"] + + @property + def get_annotation_spec( + self, + ) -> Callable[[service.GetAnnotationSpecRequest], annotation_spec.AnnotationSpec]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an annotation spec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + ~.AnnotationSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_annotation_spec" not in self._stubs: + self._stubs["get_annotation_spec"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/GetAnnotationSpec", + request_serializer=service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs["get_annotation_spec"] + + @property + def create_model( + self, + ) -> Callable[[service.CreateModelRequest], operations.Operation]: + r"""Return a callable for the create model method over gRPC. + + Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + Returns: + Callable[[~.CreateModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_model" not in self._stubs: + self._stubs["create_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/CreateModel", + request_serializer=service.CreateModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_model"] + + @property + def get_model(self) -> Callable[[service.GetModelRequest], model.Model]: + r"""Return a callable for the get model method over gRPC. + + Gets a model. + + Returns: + Callable[[~.GetModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model" not in self._stubs: + self._stubs["get_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/GetModel", + request_serializer=service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs["get_model"] + + @property + def list_models( + self, + ) -> Callable[[service.ListModelsRequest], service.ListModelsResponse]: + r"""Return a callable for the list models method over gRPC. + + Lists models. + + Returns: + Callable[[~.ListModelsRequest], + ~.ListModelsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_models" not in self._stubs: + self._stubs["list_models"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/ListModels", + request_serializer=service.ListModelsRequest.serialize, + response_deserializer=service.ListModelsResponse.deserialize, + ) + return self._stubs["list_models"] + + @property + def delete_model( + self, + ) -> Callable[[service.DeleteModelRequest], operations.Operation]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_model" not in self._stubs: + self._stubs["delete_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/DeleteModel", + request_serializer=service.DeleteModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_model"] + + @property + def update_model(self) -> Callable[[service.UpdateModelRequest], gca_model.Model]: + r"""Return a callable for the update model method over gRPC. + + Updates a model. + + Returns: + Callable[[~.UpdateModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_model" not in self._stubs: + self._stubs["update_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/UpdateModel", + request_serializer=service.UpdateModelRequest.serialize, + response_deserializer=gca_model.Model.deserialize, + ) + return self._stubs["update_model"] + + @property + def deploy_model( + self, + ) -> Callable[[service.DeployModelRequest], operations.Operation]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + + [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.DeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "deploy_model" not in self._stubs: + self._stubs["deploy_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/DeployModel", + request_serializer=service.DeployModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["deploy_model"] + + @property + def undeploy_model( + self, + ) -> Callable[[service.UndeployModelRequest], operations.Operation]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.UndeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "undeploy_model" not in self._stubs: + self._stubs["undeploy_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/UndeployModel", + request_serializer=service.UndeployModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["undeploy_model"] + + @property + def export_model( + self, + ) -> Callable[[service.ExportModelRequest], operations.Operation]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_model" not in self._stubs: + self._stubs["export_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/ExportModel", + request_serializer=service.ExportModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["export_model"] + + @property + def get_model_evaluation( + self, + ) -> Callable[ + [service.GetModelEvaluationRequest], model_evaluation.ModelEvaluation + ]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a model evaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + ~.ModelEvaluation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model_evaluation" not in self._stubs: + self._stubs["get_model_evaluation"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/GetModelEvaluation", + request_serializer=service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs["get_model_evaluation"] + + @property + def list_model_evaluations( + self, + ) -> Callable[ + [service.ListModelEvaluationsRequest], service.ListModelEvaluationsResponse + ]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists model evaluations. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + ~.ListModelEvaluationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_model_evaluations" not in self._stubs: + self._stubs["list_model_evaluations"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/ListModelEvaluations", + request_serializer=service.ListModelEvaluationsRequest.serialize, + response_deserializer=service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs["list_model_evaluations"] + + +__all__ = ("AutoMlGrpcTransport",) diff --git a/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py b/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py new file mode 100644 index 00000000..39e15a79 --- /dev/null +++ b/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py @@ -0,0 +1,773 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.automl_v1.types import annotation_spec +from google.cloud.automl_v1.types import dataset +from google.cloud.automl_v1.types import dataset as gca_dataset +from google.cloud.automl_v1.types import model +from google.cloud.automl_v1.types import model as gca_model +from google.cloud.automl_v1.types import model_evaluation +from google.cloud.automl_v1.types import service +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import AutoMlTransport, DEFAULT_CLIENT_INFO +from .grpc import AutoMlGrpcTransport + + +class AutoMlGrpcAsyncIOTransport(AutoMlTransport): + """gRPC AsyncIO backend transport for AutoMl. + + AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def create_dataset( + self, + ) -> Callable[[service.CreateDatasetRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_dataset" not in self._stubs: + self._stubs["create_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/CreateDataset", + request_serializer=service.CreateDatasetRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_dataset"] + + @property + def get_dataset( + self, + ) -> Callable[[service.GetDatasetRequest], Awaitable[dataset.Dataset]]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a dataset. + + Returns: + Callable[[~.GetDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_dataset" not in self._stubs: + self._stubs["get_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/GetDataset", + request_serializer=service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs["get_dataset"] + + @property + def list_datasets( + self, + ) -> Callable[ + [service.ListDatasetsRequest], Awaitable[service.ListDatasetsResponse] + ]: + r"""Return a callable for the list datasets method over gRPC. + + Lists datasets in a project. + + Returns: + Callable[[~.ListDatasetsRequest], + Awaitable[~.ListDatasetsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_datasets" not in self._stubs: + self._stubs["list_datasets"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/ListDatasets", + request_serializer=service.ListDatasetsRequest.serialize, + response_deserializer=service.ListDatasetsResponse.deserialize, + ) + return self._stubs["list_datasets"] + + @property + def update_dataset( + self, + ) -> Callable[[service.UpdateDatasetRequest], Awaitable[gca_dataset.Dataset]]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_dataset" not in self._stubs: + self._stubs["update_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/UpdateDataset", + request_serializer=service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs["update_dataset"] + + @property + def delete_dataset( + self, + ) -> Callable[[service.DeleteDatasetRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteDatasetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_dataset" not in self._stubs: + self._stubs["delete_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/DeleteDataset", + request_serializer=service.DeleteDatasetRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_dataset"] + + @property + def import_data( + self, + ) -> Callable[[service.ImportDataRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + Returns: + Callable[[~.ImportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_data" not in self._stubs: + self._stubs["import_data"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/ImportData", + request_serializer=service.ImportDataRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["import_data"] + + @property + def export_data( + self, + ) -> Callable[[service.ExportDataRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the export data method over gRPC. + + Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_data" not in self._stubs: + self._stubs["export_data"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/ExportData", + request_serializer=service.ExportDataRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["export_data"] + + @property + def get_annotation_spec( + self, + ) -> Callable[ + [service.GetAnnotationSpecRequest], Awaitable[annotation_spec.AnnotationSpec] + ]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an annotation spec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + Awaitable[~.AnnotationSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_annotation_spec" not in self._stubs: + self._stubs["get_annotation_spec"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/GetAnnotationSpec", + request_serializer=service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs["get_annotation_spec"] + + @property + def create_model( + self, + ) -> Callable[[service.CreateModelRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the create model method over gRPC. + + Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + Returns: + Callable[[~.CreateModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_model" not in self._stubs: + self._stubs["create_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/CreateModel", + request_serializer=service.CreateModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_model"] + + @property + def get_model(self) -> Callable[[service.GetModelRequest], Awaitable[model.Model]]: + r"""Return a callable for the get model method over gRPC. + + Gets a model. + + Returns: + Callable[[~.GetModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model" not in self._stubs: + self._stubs["get_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/GetModel", + request_serializer=service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs["get_model"] + + @property + def list_models( + self, + ) -> Callable[[service.ListModelsRequest], Awaitable[service.ListModelsResponse]]: + r"""Return a callable for the list models method over gRPC. + + Lists models. + + Returns: + Callable[[~.ListModelsRequest], + Awaitable[~.ListModelsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_models" not in self._stubs: + self._stubs["list_models"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/ListModels", + request_serializer=service.ListModelsRequest.serialize, + response_deserializer=service.ListModelsResponse.deserialize, + ) + return self._stubs["list_models"] + + @property + def delete_model( + self, + ) -> Callable[[service.DeleteModelRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_model" not in self._stubs: + self._stubs["delete_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/DeleteModel", + request_serializer=service.DeleteModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_model"] + + @property + def update_model( + self, + ) -> Callable[[service.UpdateModelRequest], Awaitable[gca_model.Model]]: + r"""Return a callable for the update model method over gRPC. + + Updates a model. + + Returns: + Callable[[~.UpdateModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_model" not in self._stubs: + self._stubs["update_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/UpdateModel", + request_serializer=service.UpdateModelRequest.serialize, + response_deserializer=gca_model.Model.deserialize, + ) + return self._stubs["update_model"] + + @property + def deploy_model( + self, + ) -> Callable[[service.DeployModelRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + + [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.DeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "deploy_model" not in self._stubs: + self._stubs["deploy_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/DeployModel", + request_serializer=service.DeployModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["deploy_model"] + + @property + def undeploy_model( + self, + ) -> Callable[[service.UndeployModelRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.UndeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "undeploy_model" not in self._stubs: + self._stubs["undeploy_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/UndeployModel", + request_serializer=service.UndeployModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["undeploy_model"] + + @property + def export_model( + self, + ) -> Callable[[service.ExportModelRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_model" not in self._stubs: + self._stubs["export_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/ExportModel", + request_serializer=service.ExportModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["export_model"] + + @property + def get_model_evaluation( + self, + ) -> Callable[ + [service.GetModelEvaluationRequest], Awaitable[model_evaluation.ModelEvaluation] + ]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a model evaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + Awaitable[~.ModelEvaluation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model_evaluation" not in self._stubs: + self._stubs["get_model_evaluation"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/GetModelEvaluation", + request_serializer=service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs["get_model_evaluation"] + + @property + def list_model_evaluations( + self, + ) -> Callable[ + [service.ListModelEvaluationsRequest], + Awaitable[service.ListModelEvaluationsResponse], + ]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists model evaluations. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + Awaitable[~.ListModelEvaluationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_model_evaluations" not in self._stubs: + self._stubs["list_model_evaluations"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.AutoMl/ListModelEvaluations", + request_serializer=service.ListModelEvaluationsRequest.serialize, + response_deserializer=service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs["list_model_evaluations"] + + +__all__ = ("AutoMlGrpcAsyncIOTransport",) diff --git a/google/cloud/automl.py b/google/cloud/automl_v1/services/prediction_service/__init__.py similarity index 64% rename from google/cloud/automl.py rename to google/cloud/automl_v1/services/prediction_service/__init__.py index 9dc44cde..0c847693 100644 --- a/google/cloud/automl.py +++ b/google/cloud/automl_v1/services/prediction_service/__init__.py @@ -1,31 +1,24 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# - -from __future__ import absolute_import - -from google.cloud.automl_v1 import AutoMlClient -from google.cloud.automl_v1 import PredictionServiceClient -from google.cloud.automl_v1 import enums -from google.cloud.automl_v1 import types - +from .client import PredictionServiceClient +from .async_client import PredictionServiceAsyncClient __all__ = ( - "enums", - "types", "PredictionServiceClient", - "AutoMlClient", + "PredictionServiceAsyncClient", ) diff --git a/google/cloud/automl_v1/services/prediction_service/async_client.py b/google/cloud/automl_v1/services/prediction_service/async_client.py new file mode 100644 index 00000000..df141602 --- /dev/null +++ b/google/cloud/automl_v1/services/prediction_service/async_client.py @@ -0,0 +1,486 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.automl_v1.types import annotation_payload +from google.cloud.automl_v1.types import data_items +from google.cloud.automl_v1.types import io +from google.cloud.automl_v1.types import operations +from google.cloud.automl_v1.types import prediction_service + +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport +from .client import PredictionServiceClient + + +class PredictionServiceAsyncClient: + """AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + """ + + _client: PredictionServiceClient + + DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT + + from_service_account_file = PredictionServiceClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, PredictionServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = PredictionServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def predict( + self, + request: prediction_service.PredictRequest = None, + *, + name: str = None, + payload: data_items.ExamplePayload = None, + params: Sequence[prediction_service.PredictRequest.ParamsEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. The prediction result is directly + returned in the response. Available for following ML scenarios, + and their expected request payloads: + + AutoML Vision Classification + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Vision Object Detection + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Natural Language Classification + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Natural Language Entity Extraction + + - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a + document in .PDF, .TIF or .TIFF format with size upto 20MB. + + AutoML Natural Language Sentiment Analysis + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Translation + + - A TextSnippet up to 25,000 characters, UTF-8 encoded. + + AutoML Tables + + - A row with column values matching the columns of the model, + up to 5MB. Not available for FORECASTING ``prediction_type``. + + Args: + request (:class:`~.prediction_service.PredictRequest`): + The request object. Request message for + [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. + name (:class:`str`): + Required. Name of the model requested + to serve the prediction. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + payload (:class:`~.data_items.ExamplePayload`): + Required. Payload to perform a + prediction on. The payload must match + the problem type that the model was + trained to solve. + This corresponds to the ``payload`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (:class:`Sequence[~.prediction_service.PredictRequest.ParamsEntry]`): + Additional domain-specific parameters, any string must + be up to 25000 characters long. + + AutoML Vision Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for an image, it will + only produce results that have at least this confidence + score. The default is 0.5. + + AutoML Vision Object Detection + + ``score_threshold`` : (float) When Model detects objects + on the image, it will only produce bounding boxes which + have at least this confidence score. Value in 0 to 1 + range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number + of bounding boxes returned. The default is 100. The + number of returned bounding boxes might be limited by + the server. + + AutoML Tables + + ``feature_importance`` : (boolean) Whether + + [feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance] + is populated in the returned list of + [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation] + objects. The default is false. + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.prediction_service.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, payload, params]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = prediction_service.PredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if payload is not None: + request.payload = payload + if params is not None: + request.params = params + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.predict, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def batch_predict( + self, + request: prediction_service.BatchPredictRequest = None, + *, + name: str = None, + input_config: io.BatchPredictInputConfig = None, + output_config: io.BatchPredictOutputConfig = None, + params: Sequence[prediction_service.BatchPredictRequest.ParamsEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML scenarios: + + - AutoML Vision Classification + - AutoML Vision Object Detection + - AutoML Video Intelligence Classification + - AutoML Video Intelligence Object Tracking \* AutoML Natural + Language Classification + - AutoML Natural Language Entity Extraction + - AutoML Natural Language Sentiment Analysis + - AutoML Tables + + Args: + request (:class:`~.prediction_service.BatchPredictRequest`): + The request object. Request message for + [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. + name (:class:`str`): + Required. Name of the model requested + to serve the batch prediction. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (:class:`~.io.BatchPredictInputConfig`): + Required. The input configuration for + batch prediction. + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`~.io.BatchPredictOutputConfig`): + Required. The Configuration + specifying where output predictions + should be written. + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (:class:`Sequence[~.prediction_service.BatchPredictRequest.ParamsEntry]`): + Additional domain-specific parameters for the + predictions, any string must be up to 25000 characters + long. + + AutoML Natural Language Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for a text snippet, it + will only produce results that have at least this + confidence score. The default is 0.5. + + AutoML Vision Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for an image, it will + only produce results that have at least this confidence + score. The default is 0.5. + + AutoML Vision Object Detection + + ``score_threshold`` : (float) When Model detects objects + on the image, it will only produce bounding boxes which + have at least this confidence score. Value in 0 to 1 + range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number + of bounding boxes returned per image. The default is + 100, the number of bounding boxes returned might be + limited by the server. AutoML Video Intelligence + Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for a video, it will + only produce results that have at least this confidence + score. The default is 0.5. + + ``segment_classification`` : (boolean) Set to true to + request segment-level classification. AutoML Video + Intelligence returns labels and their confidence scores + for the entire segment of the video that user specified + in the request configuration. The default is true. + + ``shot_classification`` : (boolean) Set to true to + request shot-level classification. AutoML Video + Intelligence determines the boundaries for each camera + shot in the entire segment of the video that user + specified in the request configuration. AutoML Video + Intelligence then returns labels and their confidence + scores for each detected shot, along with the start and + end time of the shot. The default is false. + + WARNING: Model evaluation is not done for this + classification type, the quality of it depends on + training data, but there are no metrics provided to + describe that quality. + + ``1s_interval_classification`` : (boolean) Set to true + to request classification for a video at one-second + intervals. AutoML Video Intelligence returns labels and + their confidence scores for each second of the entire + segment of the video that user specified in the request + configuration. The default is false. + + WARNING: Model evaluation is not done for this + classification type, the quality of it depends on + training data, but there are no metrics provided to + describe that quality. + + AutoML Video Intelligence Object Tracking + + ``score_threshold`` : (float) When Model detects objects + on video frames, it will only produce bounding boxes + which have at least this confidence score. Value in 0 to + 1 range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number + of bounding boxes returned per image. The default is + 100, the number of bounding boxes returned might be + limited by the server. + + ``min_bounding_box_size`` : (float) Only bounding boxes + with shortest edge at least that long as a relative + value of video frame size are returned. Value in 0 to 1 + range. Default is 0. + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.prediction_service.BatchPredictResult``: + Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of the + operation returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, input_config, output_config, params]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = prediction_service.BatchPredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + if output_config is not None: + request.output_config = output_config + if params is not None: + request.params = params + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_predict, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + prediction_service.BatchPredictResult, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-automl",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("PredictionServiceAsyncClient",) diff --git a/google/cloud/automl_v1/services/prediction_service/client.py b/google/cloud/automl_v1/services/prediction_service/client.py new file mode 100644 index 00000000..2ccfd9fc --- /dev/null +++ b/google/cloud/automl_v1/services/prediction_service/client.py @@ -0,0 +1,621 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import os +import re +from typing import Callable, Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.automl_v1.types import annotation_payload +from google.cloud.automl_v1.types import data_items +from google.cloud.automl_v1.types import io +from google.cloud.automl_v1.types import operations +from google.cloud.automl_v1.types import prediction_service + +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import PredictionServiceGrpcTransport +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport + + +class PredictionServiceClientMeta(type): + """Metaclass for the PredictionService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[PredictionServiceTransport]] + _transport_registry["grpc"] = PredictionServiceGrpcTransport + _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[PredictionServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PredictionServiceClient(metaclass=PredictionServiceClientMeta): + """AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "automl.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, PredictionServiceTransport] = None, + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = ClientOptions.from_dict(client_options) + if client_options is None: + client_options = ClientOptions.ClientOptions() + + if client_options.api_endpoint is None: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + if use_mtls_env == "never": + client_options.api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + has_client_cert_source = ( + client_options.client_cert_source is not None + or mtls.has_default_client_cert_source() + ) + client_options.api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT + if has_client_cert_source + else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PredictionServiceTransport): + # transport is a PredictionServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=client_options.api_endpoint, + scopes=client_options.scopes, + api_mtls_endpoint=client_options.api_endpoint, + client_cert_source=client_options.client_cert_source, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def predict( + self, + request: prediction_service.PredictRequest = None, + *, + name: str = None, + payload: data_items.ExamplePayload = None, + params: Sequence[prediction_service.PredictRequest.ParamsEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. The prediction result is directly + returned in the response. Available for following ML scenarios, + and their expected request payloads: + + AutoML Vision Classification + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Vision Object Detection + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Natural Language Classification + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Natural Language Entity Extraction + + - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a + document in .PDF, .TIF or .TIFF format with size upto 20MB. + + AutoML Natural Language Sentiment Analysis + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Translation + + - A TextSnippet up to 25,000 characters, UTF-8 encoded. + + AutoML Tables + + - A row with column values matching the columns of the model, + up to 5MB. Not available for FORECASTING ``prediction_type``. + + Args: + request (:class:`~.prediction_service.PredictRequest`): + The request object. Request message for + [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. + name (:class:`str`): + Required. Name of the model requested + to serve the prediction. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + payload (:class:`~.data_items.ExamplePayload`): + Required. Payload to perform a + prediction on. The payload must match + the problem type that the model was + trained to solve. + This corresponds to the ``payload`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (:class:`Sequence[~.prediction_service.PredictRequest.ParamsEntry]`): + Additional domain-specific parameters, any string must + be up to 25000 characters long. + + AutoML Vision Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for an image, it will + only produce results that have at least this confidence + score. The default is 0.5. + + AutoML Vision Object Detection + + ``score_threshold`` : (float) When Model detects objects + on the image, it will only produce bounding boxes which + have at least this confidence score. Value in 0 to 1 + range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number + of bounding boxes returned. The default is 100. The + number of returned bounding boxes might be limited by + the server. + + AutoML Tables + + ``feature_importance`` : (boolean) Whether + + [feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance] + is populated in the returned list of + [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation] + objects. The default is false. + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.prediction_service.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, payload, params]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.PredictRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.PredictRequest): + request = prediction_service.PredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if payload is not None: + request.payload = payload + if params is not None: + request.params = params + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.predict] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def batch_predict( + self, + request: prediction_service.BatchPredictRequest = None, + *, + name: str = None, + input_config: io.BatchPredictInputConfig = None, + output_config: io.BatchPredictOutputConfig = None, + params: Sequence[prediction_service.BatchPredictRequest.ParamsEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML scenarios: + + - AutoML Vision Classification + - AutoML Vision Object Detection + - AutoML Video Intelligence Classification + - AutoML Video Intelligence Object Tracking \* AutoML Natural + Language Classification + - AutoML Natural Language Entity Extraction + - AutoML Natural Language Sentiment Analysis + - AutoML Tables + + Args: + request (:class:`~.prediction_service.BatchPredictRequest`): + The request object. Request message for + [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. + name (:class:`str`): + Required. Name of the model requested + to serve the batch prediction. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (:class:`~.io.BatchPredictInputConfig`): + Required. The input configuration for + batch prediction. + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`~.io.BatchPredictOutputConfig`): + Required. The Configuration + specifying where output predictions + should be written. + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (:class:`Sequence[~.prediction_service.BatchPredictRequest.ParamsEntry]`): + Additional domain-specific parameters for the + predictions, any string must be up to 25000 characters + long. + + AutoML Natural Language Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for a text snippet, it + will only produce results that have at least this + confidence score. The default is 0.5. + + AutoML Vision Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for an image, it will + only produce results that have at least this confidence + score. The default is 0.5. + + AutoML Vision Object Detection + + ``score_threshold`` : (float) When Model detects objects + on the image, it will only produce bounding boxes which + have at least this confidence score. Value in 0 to 1 + range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number + of bounding boxes returned per image. The default is + 100, the number of bounding boxes returned might be + limited by the server. AutoML Video Intelligence + Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for a video, it will + only produce results that have at least this confidence + score. The default is 0.5. + + ``segment_classification`` : (boolean) Set to true to + request segment-level classification. AutoML Video + Intelligence returns labels and their confidence scores + for the entire segment of the video that user specified + in the request configuration. The default is true. + + ``shot_classification`` : (boolean) Set to true to + request shot-level classification. AutoML Video + Intelligence determines the boundaries for each camera + shot in the entire segment of the video that user + specified in the request configuration. AutoML Video + Intelligence then returns labels and their confidence + scores for each detected shot, along with the start and + end time of the shot. The default is false. + + WARNING: Model evaluation is not done for this + classification type, the quality of it depends on + training data, but there are no metrics provided to + describe that quality. + + ``1s_interval_classification`` : (boolean) Set to true + to request classification for a video at one-second + intervals. AutoML Video Intelligence returns labels and + their confidence scores for each second of the entire + segment of the video that user specified in the request + configuration. The default is false. + + WARNING: Model evaluation is not done for this + classification type, the quality of it depends on + training data, but there are no metrics provided to + describe that quality. + + AutoML Video Intelligence Object Tracking + + ``score_threshold`` : (float) When Model detects objects + on video frames, it will only produce bounding boxes + which have at least this confidence score. Value in 0 to + 1 range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number + of bounding boxes returned per image. The default is + 100, the number of bounding boxes returned might be + limited by the server. + + ``min_bounding_box_size`` : (float) Only bounding boxes + with shortest edge at least that long as a relative + value of video frame size are returned. Value in 0 to 1 + range. Default is 0. + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.prediction_service.BatchPredictResult``: + Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of the + operation returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, input_config, output_config, params]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.BatchPredictRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.BatchPredictRequest): + request = prediction_service.BatchPredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + if output_config is not None: + request.output_config = output_config + if params is not None: + request.params = params + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_predict] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + prediction_service.BatchPredictResult, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-automl",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("PredictionServiceClient",) diff --git a/google/cloud/automl_v1/services/prediction_service/transports/__init__.py b/google/cloud/automl_v1/services/prediction_service/transports/__init__.py new file mode 100644 index 00000000..7eb32ea8 --- /dev/null +++ b/google/cloud/automl_v1/services/prediction_service/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import PredictionServiceTransport +from .grpc import PredictionServiceGrpcTransport +from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] +_transport_registry["grpc"] = PredictionServiceGrpcTransport +_transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport + + +__all__ = ( + "PredictionServiceTransport", + "PredictionServiceGrpcTransport", + "PredictionServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/automl_v1/services/prediction_service/transports/base.py b/google/cloud/automl_v1/services/prediction_service/transports/base.py new file mode 100644 index 00000000..349d8793 --- /dev/null +++ b/google/cloud/automl_v1/services/prediction_service/transports/base.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.automl_v1.types import prediction_service +from google.longrunning import operations_pb2 as operations # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-automl",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class PredictionServiceTransport(abc.ABC): + """Abstract transport class for PredictionService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.predict: gapic_v1.method.wrap_method( + self.predict, default_timeout=60.0, client_info=client_info, + ), + self.batch_predict: gapic_v1.method.wrap_method( + self.batch_predict, default_timeout=60.0, client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def predict( + self, + ) -> typing.Callable[ + [prediction_service.PredictRequest], + typing.Union[ + prediction_service.PredictResponse, + typing.Awaitable[prediction_service.PredictResponse], + ], + ]: + raise NotImplementedError() + + @property + def batch_predict( + self, + ) -> typing.Callable[ + [prediction_service.BatchPredictRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + +__all__ = ("PredictionServiceTransport",) diff --git a/google/cloud/automl_v1/services/prediction_service/transports/grpc.py b/google/cloud/automl_v1/services/prediction_service/transports/grpc.py new file mode 100644 index 00000000..e4508add --- /dev/null +++ b/google/cloud/automl_v1/services/prediction_service/transports/grpc.py @@ -0,0 +1,345 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + + +import grpc # type: ignore + +from google.cloud.automl_v1.types import prediction_service +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO + + +class PredictionServiceGrpcTransport(PredictionServiceTransport): + """gRPC backend transport for PredictionService. + + AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def predict( + self, + ) -> Callable[ + [prediction_service.PredictRequest], prediction_service.PredictResponse + ]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. The prediction result is directly + returned in the response. Available for following ML scenarios, + and their expected request payloads: + + AutoML Vision Classification + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Vision Object Detection + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Natural Language Classification + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Natural Language Entity Extraction + + - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a + document in .PDF, .TIF or .TIFF format with size upto 20MB. + + AutoML Natural Language Sentiment Analysis + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Translation + + - A TextSnippet up to 25,000 characters, UTF-8 encoded. + + AutoML Tables + + - A row with column values matching the columns of the model, + up to 5MB. Not available for FORECASTING ``prediction_type``. + + Returns: + Callable[[~.PredictRequest], + ~.PredictResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "predict" not in self._stubs: + self._stubs["predict"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.PredictionService/Predict", + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs["predict"] + + @property + def batch_predict( + self, + ) -> Callable[[prediction_service.BatchPredictRequest], operations.Operation]: + r"""Return a callable for the batch predict method over gRPC. + + Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML scenarios: + + - AutoML Vision Classification + - AutoML Vision Object Detection + - AutoML Video Intelligence Classification + - AutoML Video Intelligence Object Tracking \* AutoML Natural + Language Classification + - AutoML Natural Language Entity Extraction + - AutoML Natural Language Sentiment Analysis + - AutoML Tables + + Returns: + Callable[[~.BatchPredictRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_predict" not in self._stubs: + self._stubs["batch_predict"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.PredictionService/BatchPredict", + request_serializer=prediction_service.BatchPredictRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["batch_predict"] + + +__all__ = ("PredictionServiceGrpcTransport",) diff --git a/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..f92ad264 --- /dev/null +++ b/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py @@ -0,0 +1,341 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.automl_v1.types import prediction_service +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import PredictionServiceGrpcTransport + + +class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): + """gRPC AsyncIO backend transport for PredictionService. + + AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def predict( + self, + ) -> Callable[ + [prediction_service.PredictRequest], + Awaitable[prediction_service.PredictResponse], + ]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. The prediction result is directly + returned in the response. Available for following ML scenarios, + and their expected request payloads: + + AutoML Vision Classification + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Vision Object Detection + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Natural Language Classification + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Natural Language Entity Extraction + + - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a + document in .PDF, .TIF or .TIFF format with size upto 20MB. + + AutoML Natural Language Sentiment Analysis + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Translation + + - A TextSnippet up to 25,000 characters, UTF-8 encoded. + + AutoML Tables + + - A row with column values matching the columns of the model, + up to 5MB. Not available for FORECASTING ``prediction_type``. + + Returns: + Callable[[~.PredictRequest], + Awaitable[~.PredictResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "predict" not in self._stubs: + self._stubs["predict"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.PredictionService/Predict", + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs["predict"] + + @property + def batch_predict( + self, + ) -> Callable[ + [prediction_service.BatchPredictRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the batch predict method over gRPC. + + Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML scenarios: + + - AutoML Vision Classification + - AutoML Vision Object Detection + - AutoML Video Intelligence Classification + - AutoML Video Intelligence Object Tracking \* AutoML Natural + Language Classification + - AutoML Natural Language Entity Extraction + - AutoML Natural Language Sentiment Analysis + - AutoML Tables + + Returns: + Callable[[~.BatchPredictRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_predict" not in self._stubs: + self._stubs["batch_predict"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1.PredictionService/BatchPredict", + request_serializer=prediction_service.BatchPredictRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["batch_predict"] + + +__all__ = ("PredictionServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/automl_v1/types.py b/google/cloud/automl_v1/types.py deleted file mode 100644 index 4b475551..00000000 --- a/google/cloud/automl_v1/types.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.automl_v1.proto import annotation_payload_pb2 -from google.cloud.automl_v1.proto import annotation_spec_pb2 -from google.cloud.automl_v1.proto import classification_pb2 -from google.cloud.automl_v1.proto import data_items_pb2 -from google.cloud.automl_v1.proto import dataset_pb2 -from google.cloud.automl_v1.proto import detection_pb2 -from google.cloud.automl_v1.proto import geometry_pb2 -from google.cloud.automl_v1.proto import image_pb2 -from google.cloud.automl_v1.proto import io_pb2 -from google.cloud.automl_v1.proto import model_evaluation_pb2 -from google.cloud.automl_v1.proto import model_pb2 -from google.cloud.automl_v1.proto import operations_pb2 as proto_operations_pb2 -from google.cloud.automl_v1.proto import prediction_service_pb2 -from google.cloud.automl_v1.proto import service_pb2 -from google.cloud.automl_v1.proto import text_extraction_pb2 -from google.cloud.automl_v1.proto import text_pb2 -from google.cloud.automl_v1.proto import text_segment_pb2 -from google.cloud.automl_v1.proto import text_sentiment_pb2 -from google.cloud.automl_v1.proto import translation_pb2 -from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import any_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 - - -_shared_modules = [ - longrunning_operations_pb2, - any_pb2, - field_mask_pb2, - timestamp_pb2, - status_pb2, -] - -_local_modules = [ - annotation_payload_pb2, - annotation_spec_pb2, - classification_pb2, - data_items_pb2, - dataset_pb2, - detection_pb2, - geometry_pb2, - image_pb2, - io_pb2, - model_evaluation_pb2, - model_pb2, - proto_operations_pb2, - prediction_service_pb2, - service_pb2, - text_extraction_pb2, - text_pb2, - text_segment_pb2, - text_sentiment_pb2, - translation_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.automl_v1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/google/cloud/automl_v1/types/__init__.py b/google/cloud/automl_v1/types/__init__.py new file mode 100644 index 00000000..0460fe2c --- /dev/null +++ b/google/cloud/automl_v1/types/__init__.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .classification import ( + ClassificationAnnotation, + ClassificationEvaluationMetrics, +) +from .geometry import ( + NormalizedVertex, + BoundingPoly, +) +from .detection import ( + ImageObjectDetectionAnnotation, + BoundingBoxMetricsEntry, + ImageObjectDetectionEvaluationMetrics, +) +from .text_segment import TextSegment +from .text_extraction import ( + TextExtractionAnnotation, + TextExtractionEvaluationMetrics, +) +from .text_sentiment import ( + TextSentimentAnnotation, + TextSentimentEvaluationMetrics, +) +from .io import ( + InputConfig, + BatchPredictInputConfig, + DocumentInputConfig, + OutputConfig, + BatchPredictOutputConfig, + ModelExportOutputConfig, + GcsSource, + GcsDestination, +) +from .data_items import ( + Image, + TextSnippet, + DocumentDimensions, + Document, + ExamplePayload, +) +from .translation import ( + TranslationDatasetMetadata, + TranslationEvaluationMetrics, + TranslationModelMetadata, + TranslationAnnotation, +) +from .annotation_payload import AnnotationPayload +from .annotation_spec import AnnotationSpec +from .image import ( + ImageClassificationDatasetMetadata, + ImageObjectDetectionDatasetMetadata, + ImageClassificationModelMetadata, + ImageObjectDetectionModelMetadata, + ImageClassificationModelDeploymentMetadata, + ImageObjectDetectionModelDeploymentMetadata, +) +from .text import ( + TextClassificationDatasetMetadata, + TextClassificationModelMetadata, + TextExtractionDatasetMetadata, + TextExtractionModelMetadata, + TextSentimentDatasetMetadata, + TextSentimentModelMetadata, +) +from .dataset import Dataset +from .model import Model +from .model_evaluation import ModelEvaluation +from .operations import ( + OperationMetadata, + DeleteOperationMetadata, + DeployModelOperationMetadata, + UndeployModelOperationMetadata, + CreateDatasetOperationMetadata, + CreateModelOperationMetadata, + ImportDataOperationMetadata, + ExportDataOperationMetadata, + BatchPredictOperationMetadata, + ExportModelOperationMetadata, +) +from .prediction_service import ( + PredictRequest, + PredictResponse, + BatchPredictRequest, + BatchPredictResult, +) +from .service import ( + CreateDatasetRequest, + GetDatasetRequest, + ListDatasetsRequest, + ListDatasetsResponse, + UpdateDatasetRequest, + DeleteDatasetRequest, + ImportDataRequest, + ExportDataRequest, + GetAnnotationSpecRequest, + CreateModelRequest, + GetModelRequest, + ListModelsRequest, + ListModelsResponse, + DeleteModelRequest, + UpdateModelRequest, + DeployModelRequest, + UndeployModelRequest, + ExportModelRequest, + GetModelEvaluationRequest, + ListModelEvaluationsRequest, + ListModelEvaluationsResponse, +) + + +__all__ = ( + "ClassificationAnnotation", + "ClassificationEvaluationMetrics", + "NormalizedVertex", + "BoundingPoly", + "ImageObjectDetectionAnnotation", + "BoundingBoxMetricsEntry", + "ImageObjectDetectionEvaluationMetrics", + "TextSegment", + "TextExtractionAnnotation", + "TextExtractionEvaluationMetrics", + "TextSentimentAnnotation", + "TextSentimentEvaluationMetrics", + "InputConfig", + "BatchPredictInputConfig", + "DocumentInputConfig", + "OutputConfig", + "BatchPredictOutputConfig", + "ModelExportOutputConfig", + "GcsSource", + "GcsDestination", + "Image", + "TextSnippet", + "DocumentDimensions", + "Document", + "ExamplePayload", + "TranslationDatasetMetadata", + "TranslationEvaluationMetrics", + "TranslationModelMetadata", + "TranslationAnnotation", + "AnnotationPayload", + "AnnotationSpec", + "ImageClassificationDatasetMetadata", + "ImageObjectDetectionDatasetMetadata", + "ImageClassificationModelMetadata", + "ImageObjectDetectionModelMetadata", + "ImageClassificationModelDeploymentMetadata", + "ImageObjectDetectionModelDeploymentMetadata", + "TextClassificationDatasetMetadata", + "TextClassificationModelMetadata", + "TextExtractionDatasetMetadata", + "TextExtractionModelMetadata", + "TextSentimentDatasetMetadata", + "TextSentimentModelMetadata", + "Dataset", + "Model", + "ModelEvaluation", + "OperationMetadata", + "DeleteOperationMetadata", + "DeployModelOperationMetadata", + "UndeployModelOperationMetadata", + "CreateDatasetOperationMetadata", + "CreateModelOperationMetadata", + "ImportDataOperationMetadata", + "ExportDataOperationMetadata", + "BatchPredictOperationMetadata", + "ExportModelOperationMetadata", + "PredictRequest", + "PredictResponse", + "BatchPredictRequest", + "BatchPredictResult", + "CreateDatasetRequest", + "GetDatasetRequest", + "ListDatasetsRequest", + "ListDatasetsResponse", + "UpdateDatasetRequest", + "DeleteDatasetRequest", + "ImportDataRequest", + "ExportDataRequest", + "GetAnnotationSpecRequest", + "CreateModelRequest", + "GetModelRequest", + "ListModelsRequest", + "ListModelsResponse", + "DeleteModelRequest", + "UpdateModelRequest", + "DeployModelRequest", + "UndeployModelRequest", + "ExportModelRequest", + "GetModelEvaluationRequest", + "ListModelEvaluationsRequest", + "ListModelEvaluationsResponse", +) diff --git a/google/cloud/automl_v1/types/annotation_payload.py b/google/cloud/automl_v1/types/annotation_payload.py new file mode 100644 index 00000000..911c6e42 --- /dev/null +++ b/google/cloud/automl_v1/types/annotation_payload.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1.types import classification as gca_classification +from google.cloud.automl_v1.types import detection +from google.cloud.automl_v1.types import text_extraction as gca_text_extraction +from google.cloud.automl_v1.types import text_sentiment as gca_text_sentiment +from google.cloud.automl_v1.types import translation as gca_translation + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", manifest={"AnnotationPayload",}, +) + + +class AnnotationPayload(proto.Message): + r"""Contains annotation information that is relevant to AutoML. + + Attributes: + translation (~.gca_translation.TranslationAnnotation): + Annotation details for translation. + classification (~.gca_classification.ClassificationAnnotation): + Annotation details for content or image + classification. + image_object_detection (~.detection.ImageObjectDetectionAnnotation): + Annotation details for image object + detection. + text_extraction (~.gca_text_extraction.TextExtractionAnnotation): + Annotation details for text extraction. + text_sentiment (~.gca_text_sentiment.TextSentimentAnnotation): + Annotation details for text sentiment. + annotation_spec_id (str): + Output only . The resource ID of the + annotation spec that this annotation pertains + to. The annotation spec comes from either an + ancestor dataset, or the dataset that was used + to train the model in use. + display_name (str): + Output only. The value of + [display_name][google.cloud.automl.v1.AnnotationSpec.display_name] + when the model was trained. Because this field returns a + value at model training time, for different models trained + using the same dataset, the returned value could be + different as model owner could update the ``display_name`` + between any two model training. + """ + + translation = proto.Field( + proto.MESSAGE, + number=2, + oneof="detail", + message=gca_translation.TranslationAnnotation, + ) + + classification = proto.Field( + proto.MESSAGE, + number=3, + oneof="detail", + message=gca_classification.ClassificationAnnotation, + ) + + image_object_detection = proto.Field( + proto.MESSAGE, + number=4, + oneof="detail", + message=detection.ImageObjectDetectionAnnotation, + ) + + text_extraction = proto.Field( + proto.MESSAGE, + number=6, + oneof="detail", + message=gca_text_extraction.TextExtractionAnnotation, + ) + + text_sentiment = proto.Field( + proto.MESSAGE, + number=7, + oneof="detail", + message=gca_text_sentiment.TextSentimentAnnotation, + ) + + annotation_spec_id = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=5) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/annotation_spec.py b/google/cloud/automl_v1/types/annotation_spec.py new file mode 100644 index 00000000..9d13be8d --- /dev/null +++ b/google/cloud/automl_v1/types/annotation_spec.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", manifest={"AnnotationSpec",}, +) + + +class AnnotationSpec(proto.Message): + r"""A definition of an annotation spec. + + Attributes: + name (str): + Output only. Resource name of the annotation spec. Form: + + 'projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/annotationSpecs/{annotation_spec_id}' + display_name (str): + Required. The name of the annotation spec to show in the + interface. The name can be up to 32 characters long and must + match the regexp ``[a-zA-Z0-9_]+``. + example_count (int): + Output only. The number of examples in the + parent dataset labeled by the annotation spec. + """ + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + example_count = proto.Field(proto.INT32, number=9) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/classification.py b/google/cloud/automl_v1/types/classification.py new file mode 100644 index 00000000..c925f206 --- /dev/null +++ b/google/cloud/automl_v1/types/classification.py @@ -0,0 +1,249 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", + manifest={ + "ClassificationType", + "ClassificationAnnotation", + "ClassificationEvaluationMetrics", + }, +) + + +class ClassificationType(proto.Enum): + r"""Type of the classification problem.""" + CLASSIFICATION_TYPE_UNSPECIFIED = 0 + MULTICLASS = 1 + MULTILABEL = 2 + + +class ClassificationAnnotation(proto.Message): + r"""Contains annotation details specific to classification. + + Attributes: + score (float): + Output only. A confidence estimate between + 0.0 and 1.0. A higher value means greater + confidence that the annotation is positive. If a + user approves an annotation as negative or + positive, the score value remains unchanged. If + a user creates an annotation, the score is 0 for + negative or 1 for positive. + """ + + score = proto.Field(proto.FLOAT, number=1) + + +class ClassificationEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for classification problems. Note: For + Video Classification this metrics only describe quality of the Video + Classification predictions of "segment_classification" type. + + Attributes: + au_prc (float): + Output only. The Area Under Precision-Recall + Curve metric. Micro-averaged for the overall + evaluation. + au_roc (float): + Output only. The Area Under Receiver + Operating Characteristic curve metric. Micro- + averaged for the overall evaluation. + log_loss (float): + Output only. The Log Loss metric. + confidence_metrics_entry (Sequence[~.classification.ClassificationEvaluationMetrics.ConfidenceMetricsEntry]): + Output only. Metrics for each confidence_threshold in + 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and + position_threshold = INT32_MAX_VALUE. ROC and + precision-recall curves, and other aggregated metrics are + derived from them. The confidence metrics entries may also + be supplied for additional values of position_threshold, but + from these no aggregated metrics are computed. + confusion_matrix (~.classification.ClassificationEvaluationMetrics.ConfusionMatrix): + Output only. Confusion matrix of the + evaluation. Only set for MULTICLASS + classification problems where number of labels + is no more than 10. + Only set for model level evaluation, not for + evaluation per label. + annotation_spec_id (Sequence[str]): + Output only. The annotation spec ids used for + this evaluation. + """ + + class ConfidenceMetricsEntry(proto.Message): + r"""Metrics for a single confidence threshold. + + Attributes: + confidence_threshold (float): + Output only. Metrics are computed with an + assumption that the model never returns + predictions with score lower than this value. + position_threshold (int): + Output only. Metrics are computed with an assumption that + the model always returns at most this many predictions + (ordered by their score, descendingly), but they all still + need to meet the confidence_threshold. + recall (float): + Output only. Recall (True Positive Rate) for + the given confidence threshold. + precision (float): + Output only. Precision for the given + confidence threshold. + false_positive_rate (float): + Output only. False Positive Rate for the + given confidence threshold. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + recall_at1 (float): + Output only. The Recall (True Positive Rate) + when only considering the label that has the + highest prediction score and not below the + confidence threshold for each example. + precision_at1 (float): + Output only. The precision when only + considering the label that has the highest + prediction score and not below the confidence + threshold for each example. + false_positive_rate_at1 (float): + Output only. The False Positive Rate when + only considering the label that has the highest + prediction score and not below the confidence + threshold for each example. + f1_score_at1 (float): + Output only. The harmonic mean of + [recall_at1][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] + and + [precision_at1][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1]. + true_positive_count (int): + Output only. The number of model created + labels that match a ground truth label. + false_positive_count (int): + Output only. The number of model created + labels that do not match a ground truth label. + false_negative_count (int): + Output only. The number of ground truth + labels that are not matched by a model created + label. + true_negative_count (int): + Output only. The number of labels that were + not created by the model, but if they would, + they would not match a ground truth label. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + position_threshold = proto.Field(proto.INT32, number=14) + + recall = proto.Field(proto.FLOAT, number=2) + + precision = proto.Field(proto.FLOAT, number=3) + + false_positive_rate = proto.Field(proto.FLOAT, number=8) + + f1_score = proto.Field(proto.FLOAT, number=4) + + recall_at1 = proto.Field(proto.FLOAT, number=5) + + precision_at1 = proto.Field(proto.FLOAT, number=6) + + false_positive_rate_at1 = proto.Field(proto.FLOAT, number=9) + + f1_score_at1 = proto.Field(proto.FLOAT, number=7) + + true_positive_count = proto.Field(proto.INT64, number=10) + + false_positive_count = proto.Field(proto.INT64, number=11) + + false_negative_count = proto.Field(proto.INT64, number=12) + + true_negative_count = proto.Field(proto.INT64, number=13) + + class ConfusionMatrix(proto.Message): + r"""Confusion matrix of the model running the classification. + + Attributes: + annotation_spec_id (Sequence[str]): + Output only. IDs of the annotation specs used in the + confusion matrix. For Tables CLASSIFICATION + + [prediction_type][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type] + only list of [annotation_spec_display_name-s][] is + populated. + display_name (Sequence[str]): + Output only. Display name of the annotation specs used in + the confusion matrix, as they were at the moment of the + evaluation. For Tables CLASSIFICATION + + [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type], + distinct values of the target column at the moment of the + model evaluation are populated here. + row (Sequence[~.classification.ClassificationEvaluationMetrics.ConfusionMatrix.Row]): + Output only. Rows in the confusion matrix. The number of + rows is equal to the size of ``annotation_spec_id``. + ``row[i].example_count[j]`` is the number of examples that + have ground truth of the ``annotation_spec_id[i]`` and are + predicted as ``annotation_spec_id[j]`` by the model being + evaluated. + """ + + class Row(proto.Message): + r"""Output only. A row in the confusion matrix. + + Attributes: + example_count (Sequence[int]): + Output only. Value of the specific cell in the confusion + matrix. The number of values each row has (i.e. the length + of the row) is equal to the length of the + ``annotation_spec_id`` field or, if that one is not + populated, length of the + [display_name][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name] + field. + """ + + example_count = proto.RepeatedField(proto.INT32, number=1) + + annotation_spec_id = proto.RepeatedField(proto.STRING, number=1) + + display_name = proto.RepeatedField(proto.STRING, number=3) + + row = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="ClassificationEvaluationMetrics.ConfusionMatrix.Row", + ) + + au_prc = proto.Field(proto.FLOAT, number=1) + + au_roc = proto.Field(proto.FLOAT, number=6) + + log_loss = proto.Field(proto.FLOAT, number=7) + + confidence_metrics_entry = proto.RepeatedField( + proto.MESSAGE, number=3, message=ConfidenceMetricsEntry, + ) + + confusion_matrix = proto.Field(proto.MESSAGE, number=4, message=ConfusionMatrix,) + + annotation_spec_id = proto.RepeatedField(proto.STRING, number=5) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/data_items.py b/google/cloud/automl_v1/types/data_items.py new file mode 100644 index 00000000..51ccc477 --- /dev/null +++ b/google/cloud/automl_v1/types/data_items.py @@ -0,0 +1,221 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1.types import geometry +from google.cloud.automl_v1.types import io +from google.cloud.automl_v1.types import text_segment as gca_text_segment + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", + manifest={ + "Image", + "TextSnippet", + "DocumentDimensions", + "Document", + "ExamplePayload", + }, +) + + +class Image(proto.Message): + r"""A representation of an image. + Only images up to 30MB in size are supported. + + Attributes: + image_bytes (bytes): + Image content represented as a stream of bytes. Note: As + with all ``bytes`` fields, protobuffers use a pure binary + representation, whereas JSON representations use base64. + thumbnail_uri (str): + Output only. HTTP URI to the thumbnail image. + """ + + image_bytes = proto.Field(proto.BYTES, number=1, oneof="data") + + thumbnail_uri = proto.Field(proto.STRING, number=4) + + +class TextSnippet(proto.Message): + r"""A representation of a text snippet. + + Attributes: + content (str): + Required. The content of the text snippet as + a string. Up to 250000 characters long. + mime_type (str): + Optional. The format of + [content][google.cloud.automl.v1.TextSnippet.content]. + Currently the only two allowed values are "text/html" and + "text/plain". If left blank, the format is automatically + determined from the type of the uploaded + [content][google.cloud.automl.v1.TextSnippet.content]. + content_uri (str): + Output only. HTTP URI where you can download + the content. + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + content_uri = proto.Field(proto.STRING, number=4) + + +class DocumentDimensions(proto.Message): + r"""Message that describes dimension of a document. + + Attributes: + unit (~.data_items.DocumentDimensions.DocumentDimensionUnit): + Unit of the dimension. + width (float): + Width value of the document, works together + with the unit. + height (float): + Height value of the document, works together + with the unit. + """ + + class DocumentDimensionUnit(proto.Enum): + r"""Unit of the document dimension.""" + DOCUMENT_DIMENSION_UNIT_UNSPECIFIED = 0 + INCH = 1 + CENTIMETER = 2 + POINT = 3 + + unit = proto.Field(proto.ENUM, number=1, enum=DocumentDimensionUnit,) + + width = proto.Field(proto.FLOAT, number=2) + + height = proto.Field(proto.FLOAT, number=3) + + +class Document(proto.Message): + r"""A structured text document e.g. a PDF. + + Attributes: + input_config (~.io.DocumentInputConfig): + An input config specifying the content of the + document. + document_text (~.data_items.TextSnippet): + The plain text version of this document. + layout (Sequence[~.data_items.Document.Layout]): + Describes the layout of the document. Sorted by + [page_number][]. + document_dimensions (~.data_items.DocumentDimensions): + The dimensions of the page in the document. + page_count (int): + Number of pages in the document. + """ + + class Layout(proto.Message): + r"""Describes the layout information of a + [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] + in the document. + + Attributes: + text_segment (~.gca_text_segment.TextSegment): + Text Segment that represents a segment in + [document_text][google.cloud.automl.v1p1beta.Document.document_text]. + page_number (int): + Page number of the + [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] + in the original document, starts from 1. + bounding_poly (~.geometry.BoundingPoly): + The position of the + [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] + in the page. Contains exactly 4 + + [normalized_vertices][google.cloud.automl.v1p1beta.BoundingPoly.normalized_vertices] + and they are connected by edges in the order provided, which + will represent a rectangle parallel to the frame. The + [NormalizedVertex-s][google.cloud.automl.v1p1beta.NormalizedVertex] + are relative to the page. Coordinates are based on top-left + as point (0,0). + text_segment_type (~.data_items.Document.Layout.TextSegmentType): + The type of the + [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] + in document. + """ + + class TextSegmentType(proto.Enum): + r"""The type of TextSegment in the context of the original + document. + """ + TEXT_SEGMENT_TYPE_UNSPECIFIED = 0 + TOKEN = 1 + PARAGRAPH = 2 + FORM_FIELD = 3 + FORM_FIELD_NAME = 4 + FORM_FIELD_CONTENTS = 5 + TABLE = 6 + TABLE_HEADER = 7 + TABLE_ROW = 8 + TABLE_CELL = 9 + + text_segment = proto.Field( + proto.MESSAGE, number=1, message=gca_text_segment.TextSegment, + ) + + page_number = proto.Field(proto.INT32, number=2) + + bounding_poly = proto.Field( + proto.MESSAGE, number=3, message=geometry.BoundingPoly, + ) + + text_segment_type = proto.Field( + proto.ENUM, number=4, enum="Document.Layout.TextSegmentType", + ) + + input_config = proto.Field(proto.MESSAGE, number=1, message=io.DocumentInputConfig,) + + document_text = proto.Field(proto.MESSAGE, number=2, message=TextSnippet,) + + layout = proto.RepeatedField(proto.MESSAGE, number=3, message=Layout,) + + document_dimensions = proto.Field( + proto.MESSAGE, number=4, message=DocumentDimensions, + ) + + page_count = proto.Field(proto.INT32, number=5) + + +class ExamplePayload(proto.Message): + r"""Example data used for training or prediction. + + Attributes: + image (~.data_items.Image): + Example image. + text_snippet (~.data_items.TextSnippet): + Example text. + document (~.data_items.Document): + Example document. + """ + + image = proto.Field(proto.MESSAGE, number=1, oneof="payload", message=Image,) + + text_snippet = proto.Field( + proto.MESSAGE, number=2, oneof="payload", message=TextSnippet, + ) + + document = proto.Field(proto.MESSAGE, number=4, oneof="payload", message=Document,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/dataset.py b/google/cloud/automl_v1/types/dataset.py new file mode 100644 index 00000000..c085c3e6 --- /dev/null +++ b/google/cloud/automl_v1/types/dataset.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1.types import image +from google.cloud.automl_v1.types import text +from google.cloud.automl_v1.types import translation +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module(package="google.cloud.automl.v1", manifest={"Dataset",},) + + +class Dataset(proto.Message): + r"""A workspace for solving a single, particular machine learning + (ML) problem. A workspace contains examples that may be + annotated. + + Attributes: + translation_dataset_metadata (~.translation.TranslationDatasetMetadata): + Metadata for a dataset used for translation. + image_classification_dataset_metadata (~.image.ImageClassificationDatasetMetadata): + Metadata for a dataset used for image + classification. + text_classification_dataset_metadata (~.text.TextClassificationDatasetMetadata): + Metadata for a dataset used for text + classification. + image_object_detection_dataset_metadata (~.image.ImageObjectDetectionDatasetMetadata): + Metadata for a dataset used for image object + detection. + text_extraction_dataset_metadata (~.text.TextExtractionDatasetMetadata): + Metadata for a dataset used for text + extraction. + text_sentiment_dataset_metadata (~.text.TextSentimentDatasetMetadata): + Metadata for a dataset used for text + sentiment. + name (str): + Output only. The resource name of the dataset. Form: + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`` + display_name (str): + Required. The name of the dataset to show in the interface. + The name can be up to 32 characters long and can consist + only of ASCII Latin letters A-Z and a-z, underscores (_), + and ASCII digits 0-9. + description (str): + User-provided description of the dataset. The + description can be up to 25000 characters long. + example_count (int): + Output only. The number of examples in the + dataset. + create_time (~.timestamp.Timestamp): + Output only. Timestamp when this dataset was + created. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Sequence[~.dataset.Dataset.LabelsEntry]): + Optional. The labels with user-defined + metadata to organize your dataset. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. Label values are optional. Label + keys must start with a letter. + See https://goo.gl/xmQnxf for more information + on and examples of labels. + """ + + translation_dataset_metadata = proto.Field( + proto.MESSAGE, + number=23, + oneof="dataset_metadata", + message=translation.TranslationDatasetMetadata, + ) + + image_classification_dataset_metadata = proto.Field( + proto.MESSAGE, + number=24, + oneof="dataset_metadata", + message=image.ImageClassificationDatasetMetadata, + ) + + text_classification_dataset_metadata = proto.Field( + proto.MESSAGE, + number=25, + oneof="dataset_metadata", + message=text.TextClassificationDatasetMetadata, + ) + + image_object_detection_dataset_metadata = proto.Field( + proto.MESSAGE, + number=26, + oneof="dataset_metadata", + message=image.ImageObjectDetectionDatasetMetadata, + ) + + text_extraction_dataset_metadata = proto.Field( + proto.MESSAGE, + number=28, + oneof="dataset_metadata", + message=text.TextExtractionDatasetMetadata, + ) + + text_sentiment_dataset_metadata = proto.Field( + proto.MESSAGE, + number=30, + oneof="dataset_metadata", + message=text.TextSentimentDatasetMetadata, + ) + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + description = proto.Field(proto.STRING, number=3) + + example_count = proto.Field(proto.INT32, number=21) + + create_time = proto.Field(proto.MESSAGE, number=14, message=timestamp.Timestamp,) + + etag = proto.Field(proto.STRING, number=17) + + labels = proto.MapField(proto.STRING, proto.STRING, number=39) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/detection.py b/google/cloud/automl_v1/types/detection.py new file mode 100644 index 00000000..6eab690c --- /dev/null +++ b/google/cloud/automl_v1/types/detection.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1.types import geometry + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", + manifest={ + "ImageObjectDetectionAnnotation", + "BoundingBoxMetricsEntry", + "ImageObjectDetectionEvaluationMetrics", + }, +) + + +class ImageObjectDetectionAnnotation(proto.Message): + r"""Annotation details for image object detection. + + Attributes: + bounding_box (~.geometry.BoundingPoly): + Output only. The rectangle representing the + object location. + score (float): + Output only. The confidence that this annotation is positive + for the parent example, value in [0, 1], higher means higher + positivity confidence. + """ + + bounding_box = proto.Field(proto.MESSAGE, number=1, message=geometry.BoundingPoly,) + + score = proto.Field(proto.FLOAT, number=2) + + +class BoundingBoxMetricsEntry(proto.Message): + r"""Bounding box matching model metrics for a single + intersection-over-union threshold and multiple label match + confidence thresholds. + + Attributes: + iou_threshold (float): + Output only. The intersection-over-union + threshold value used to compute this metrics + entry. + mean_average_precision (float): + Output only. The mean average precision, most often close to + au_prc. + confidence_metrics_entries (Sequence[~.detection.BoundingBoxMetricsEntry.ConfidenceMetricsEntry]): + Output only. Metrics for each label-match + confidence_threshold from + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall + curve is derived from them. + """ + + class ConfidenceMetricsEntry(proto.Message): + r"""Metrics for a single confidence threshold. + + Attributes: + confidence_threshold (float): + Output only. The confidence threshold value + used to compute the metrics. + recall (float): + Output only. Recall under the given + confidence threshold. + precision (float): + Output only. Precision under the given + confidence threshold. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + recall = proto.Field(proto.FLOAT, number=2) + + precision = proto.Field(proto.FLOAT, number=3) + + f1_score = proto.Field(proto.FLOAT, number=4) + + iou_threshold = proto.Field(proto.FLOAT, number=1) + + mean_average_precision = proto.Field(proto.FLOAT, number=2) + + confidence_metrics_entries = proto.RepeatedField( + proto.MESSAGE, number=3, message=ConfidenceMetricsEntry, + ) + + +class ImageObjectDetectionEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for image object detection problems. + Evaluates prediction quality of labeled bounding boxes. + + Attributes: + evaluated_bounding_box_count (int): + Output only. The total number of bounding + boxes (i.e. summed over all images) the ground + truth used to create this evaluation had. + bounding_box_metrics_entries (Sequence[~.detection.BoundingBoxMetricsEntry]): + Output only. The bounding boxes match metrics + for each Intersection-over-union threshold + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each + label confidence threshold + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. + bounding_box_mean_average_precision (float): + Output only. The single metric for bounding boxes + evaluation: the mean_average_precision averaged over all + bounding_box_metrics_entries. + """ + + evaluated_bounding_box_count = proto.Field(proto.INT32, number=1) + + bounding_box_metrics_entries = proto.RepeatedField( + proto.MESSAGE, number=2, message=BoundingBoxMetricsEntry, + ) + + bounding_box_mean_average_precision = proto.Field(proto.FLOAT, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/geometry.py b/google/cloud/automl_v1/types/geometry.py new file mode 100644 index 00000000..f459ca52 --- /dev/null +++ b/google/cloud/automl_v1/types/geometry.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", manifest={"NormalizedVertex", "BoundingPoly",}, +) + + +class NormalizedVertex(proto.Message): + r"""A vertex represents a 2D point in the image. + The normalized vertex coordinates are between 0 to 1 fractions + relative to the original plane (image, video). E.g. if the plane + (e.g. whole image) would have size 10 x 20 then a point with + normalized coordinates (0.1, 0.3) would be at the position (1, + 6) on that plane. + + Attributes: + x (float): + Required. Horizontal coordinate. + y (float): + Required. Vertical coordinate. + """ + + x = proto.Field(proto.FLOAT, number=1) + + y = proto.Field(proto.FLOAT, number=2) + + +class BoundingPoly(proto.Message): + r"""A bounding polygon of a detected object on a plane. On output both + vertices and normalized_vertices are provided. The polygon is formed + by connecting vertices in the order they are listed. + + Attributes: + normalized_vertices (Sequence[~.geometry.NormalizedVertex]): + Output only . The bounding polygon normalized + vertices. + """ + + normalized_vertices = proto.RepeatedField( + proto.MESSAGE, number=2, message=NormalizedVertex, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/image.py b/google/cloud/automl_v1/types/image.py new file mode 100644 index 00000000..d025fdc9 --- /dev/null +++ b/google/cloud/automl_v1/types/image.py @@ -0,0 +1,282 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1.types import classification + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", + manifest={ + "ImageClassificationDatasetMetadata", + "ImageObjectDetectionDatasetMetadata", + "ImageClassificationModelMetadata", + "ImageObjectDetectionModelMetadata", + "ImageClassificationModelDeploymentMetadata", + "ImageObjectDetectionModelDeploymentMetadata", + }, +) + + +class ImageClassificationDatasetMetadata(proto.Message): + r"""Dataset metadata that is specific to image classification. + + Attributes: + classification_type (~.classification.ClassificationType): + Required. Type of the classification problem. + """ + + classification_type = proto.Field( + proto.ENUM, number=1, enum=classification.ClassificationType, + ) + + +class ImageObjectDetectionDatasetMetadata(proto.Message): + r"""Dataset metadata specific to image object detection.""" + + +class ImageClassificationModelMetadata(proto.Message): + r"""Model metadata for image classification. + + Attributes: + base_model_id (str): + Optional. The ID of the ``base`` model. If it is specified, + the new model will be created based on the ``base`` model. + Otherwise, the new model will be created from scratch. The + ``base`` model must be in the same ``project`` and + ``location`` as the new model to create, and have the same + ``model_type``. + train_budget_milli_node_hours (int): + The train budget of creating this model, expressed in milli + node hours i.e. 1,000 value in this field means 1 node hour. + The actual ``train_cost`` will be equal or less than this + value. If further model training ceases to provide any + improvements, it will stop without using full budget and the + stop_reason will be ``MODEL_CONVERGED``. Note, node_hour = + actual_hour \* number_of_nodes_invovled. For model type + ``cloud``\ (default), the train budget must be between 8,000 + and 800,000 milli node hours, inclusive. The default value + is 192, 000 which represents one day in wall time. For model + type ``mobile-low-latency-1``, ``mobile-versatile-1``, + ``mobile-high-accuracy-1``, + ``mobile-core-ml-low-latency-1``, + ``mobile-core-ml-versatile-1``, + ``mobile-core-ml-high-accuracy-1``, the train budget must be + between 1,000 and 100,000 milli node hours, inclusive. The + default value is 24, 000 which represents one day in wall + time. + train_cost_milli_node_hours (int): + Output only. The actual train cost of + creating this model, expressed in milli node + hours, i.e. 1,000 value in this field means 1 + node hour. Guaranteed to not exceed the train + budget. + stop_reason (str): + Output only. The reason that this create model operation + stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. + model_type (str): + Optional. Type of the model. The available values are: + + - ``cloud`` - Model to be used via prediction calls to + AutoML API. This is the default value. + - ``mobile-low-latency-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have low latency, but may have + lower prediction quality than other models. + - ``mobile-versatile-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. + - ``mobile-high-accuracy-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have a higher latency, but should + also have a higher prediction quality than other models. + - ``mobile-core-ml-low-latency-1`` - A model that, in + addition to providing prediction via AutoML API, can also + be exported (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile device with Core ML afterwards. + Expected to have low latency, but may have lower + prediction quality than other models. + - ``mobile-core-ml-versatile-1`` - A model that, in + addition to providing prediction via AutoML API, can also + be exported (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile device with Core ML afterwards. + - ``mobile-core-ml-high-accuracy-1`` - A model that, in + addition to providing prediction via AutoML API, can also + be exported (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile device with Core ML afterwards. + Expected to have a higher latency, but should also have a + higher prediction quality than other models. + node_qps (float): + Output only. An approximate number of online + prediction QPS that can be supported by this + model per each node on which it is deployed. + node_count (int): + Output only. The number of nodes this model is deployed on. + A node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the node_qps field. + """ + + base_model_id = proto.Field(proto.STRING, number=1) + + train_budget_milli_node_hours = proto.Field(proto.INT64, number=16) + + train_cost_milli_node_hours = proto.Field(proto.INT64, number=17) + + stop_reason = proto.Field(proto.STRING, number=5) + + model_type = proto.Field(proto.STRING, number=7) + + node_qps = proto.Field(proto.DOUBLE, number=13) + + node_count = proto.Field(proto.INT64, number=14) + + +class ImageObjectDetectionModelMetadata(proto.Message): + r"""Model metadata specific to image object detection. + + Attributes: + model_type (str): + Optional. Type of the model. The available values are: + + - ``cloud-high-accuracy-1`` - (default) A model to be used + via prediction calls to AutoML API. Expected to have a + higher latency, but should also have a higher prediction + quality than other models. + - ``cloud-low-latency-1`` - A model to be used via + prediction calls to AutoML API. Expected to have low + latency, but may have lower prediction quality than other + models. + - ``mobile-low-latency-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have low latency, but may have + lower prediction quality than other models. + - ``mobile-versatile-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. + - ``mobile-high-accuracy-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have a higher latency, but should + also have a higher prediction quality than other models. + node_count (int): + Output only. The number of nodes this model is deployed on. + A node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the qps_per_node + field. + node_qps (float): + Output only. An approximate number of online + prediction QPS that can be supported by this + model per each node on which it is deployed. + stop_reason (str): + Output only. The reason that this create model operation + stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. + train_budget_milli_node_hours (int): + The train budget of creating this model, expressed in milli + node hours i.e. 1,000 value in this field means 1 node hour. + The actual ``train_cost`` will be equal or less than this + value. If further model training ceases to provide any + improvements, it will stop without using full budget and the + stop_reason will be ``MODEL_CONVERGED``. Note, node_hour = + actual_hour \* number_of_nodes_invovled. For model type + ``cloud-high-accuracy-1``\ (default) and + ``cloud-low-latency-1``, the train budget must be between + 20,000 and 900,000 milli node hours, inclusive. The default + value is 216, 000 which represents one day in wall time. For + model type ``mobile-low-latency-1``, ``mobile-versatile-1``, + ``mobile-high-accuracy-1``, + ``mobile-core-ml-low-latency-1``, + ``mobile-core-ml-versatile-1``, + ``mobile-core-ml-high-accuracy-1``, the train budget must be + between 1,000 and 100,000 milli node hours, inclusive. The + default value is 24, 000 which represents one day in wall + time. + train_cost_milli_node_hours (int): + Output only. The actual train cost of + creating this model, expressed in milli node + hours, i.e. 1,000 value in this field means 1 + node hour. Guaranteed to not exceed the train + budget. + """ + + model_type = proto.Field(proto.STRING, number=1) + + node_count = proto.Field(proto.INT64, number=3) + + node_qps = proto.Field(proto.DOUBLE, number=4) + + stop_reason = proto.Field(proto.STRING, number=5) + + train_budget_milli_node_hours = proto.Field(proto.INT64, number=6) + + train_cost_milli_node_hours = proto.Field(proto.INT64, number=7) + + +class ImageClassificationModelDeploymentMetadata(proto.Message): + r"""Model deployment metadata specific to Image Classification. + + Attributes: + node_count (int): + Input only. The number of nodes to deploy the model on. A + node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the model's + + [node_qps][google.cloud.automl.v1.ImageClassificationModelMetadata.node_qps]. + Must be between 1 and 100, inclusive on both ends. + """ + + node_count = proto.Field(proto.INT64, number=1) + + +class ImageObjectDetectionModelDeploymentMetadata(proto.Message): + r"""Model deployment metadata specific to Image Object Detection. + + Attributes: + node_count (int): + Input only. The number of nodes to deploy the model on. A + node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the model's + + [qps_per_node][google.cloud.automl.v1.ImageObjectDetectionModelMetadata.qps_per_node]. + Must be between 1 and 100, inclusive on both ends. + """ + + node_count = proto.Field(proto.INT64, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/io.py b/google/cloud/automl_v1/types/io.py new file mode 100644 index 00000000..34eee669 --- /dev/null +++ b/google/cloud/automl_v1/types/io.py @@ -0,0 +1,1556 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", + manifest={ + "InputConfig", + "BatchPredictInputConfig", + "DocumentInputConfig", + "OutputConfig", + "BatchPredictOutputConfig", + "ModelExportOutputConfig", + "GcsSource", + "GcsDestination", + }, +) + + +class InputConfig(proto.Message): + r"""Input configuration for + [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] + action. + + The format of input depends on dataset_metadata the Dataset into + which the import is happening has. As input source the + [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] is + expected, unless specified otherwise. Additionally any input .CSV + file by itself must be 100MB or smaller, unless specified otherwise. + If an "example" file (that is, image, video etc.) with identical + content (even if it had different ``GCS_FILE_PATH``) is mentioned + multiple times, then its label, bounding boxes etc. are appended. + The same file should be always provided with the same ``ML_USE`` and + ``GCS_FILE_PATH``, if it is not, then these values are + nondeterministically selected from the given ones. + + The formats are represented in EBNF with commas being literal and + with non-terminal symbols defined near the end of this comment. The + formats are: + + .. raw:: html + +

AutoML Vision

+ + .. raw:: html + +
Classification
+ + See `Preparing your training + data `__ for + more information. + + CSV file(s) with each line in format: + + :: + + ML_USE,GCS_FILE_PATH,LABEL,LABEL,... + + - ``ML_USE`` - Identifies the data set that the current row (file) + applies to. This value can be one of the following: + + - ``TRAIN`` - Rows in this file are used to train the model. + - ``TEST`` - Rows in this file are used to test the model during + training. + - ``UNASSIGNED`` - Rows in this file are not categorized. They + are Automatically divided into train and test data. 80% for + training and 20% for testing. + + - ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image + of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, + .WEBP, .BMP, .TIFF, .ICO. + + - ``LABEL`` - A label that identifies the object in the image. + + For the ``MULTICLASS`` classification type, at most one ``LABEL`` is + allowed per image. If an image has not yet been labeled, then it + should be mentioned just once with no ``LABEL``. + + Some sample rows: + + :: + + TRAIN,gs://folder/image1.jpg,daisy + TEST,gs://folder/image2.jpg,dandelion,tulip,rose + UNASSIGNED,gs://folder/image3.jpg,daisy + UNASSIGNED,gs://folder/image4.jpg + + .. raw:: html + +
Object Detection
+ See [Preparing your training + data](https://cloud.google.com/vision/automl/object-detection/docs/prepare) + for more information. + + A CSV file(s) with each line in format: + + :: + + ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,) + + - ``ML_USE`` - Identifies the data set that the current row (file) + applies to. This value can be one of the following: + + - ``TRAIN`` - Rows in this file are used to train the model. + - ``TEST`` - Rows in this file are used to test the model during + training. + - ``UNASSIGNED`` - Rows in this file are not categorized. They + are Automatically divided into train and test data. 80% for + training and 20% for testing. + + - ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image + of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. + Each image is assumed to be exhaustively labeled. + + - ``LABEL`` - A label that identifies the object in the image + specified by the ``BOUNDING_BOX``. + + - ``BOUNDING BOX`` - The vertices of an object in the example + image. The minimum allowed ``BOUNDING_BOX`` edge length is 0.01, + and no more than 500 ``BOUNDING_BOX`` instances per image are + allowed (one ``BOUNDING_BOX`` per line). If an image has no + looked for objects then it should be mentioned just once with no + LABEL and the ",,,,,,," in place of the ``BOUNDING_BOX``. + + **Four sample rows:** + + :: + + TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, + TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, + UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 + TEST,gs://folder/im3.png,,,,,,,,, + + .. raw:: html + +
+
+ + .. raw:: html + +

AutoML Video Intelligence

+ + .. raw:: html + +
Classification
+ + See `Preparing your training + data `__ + for more information. + + CSV file(s) with each line in format: + + :: + + ML_USE,GCS_FILE_PATH + + For ``ML_USE``, do not use ``VALIDATE``. + + ``GCS_FILE_PATH`` is the path to another .csv file that describes + training example for a given ``ML_USE``, using the following row + format: + + :: + + GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,) + + Here ``GCS_FILE_PATH`` leads to a video of up to 50GB in size and up + to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. + + ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the + length of the video, and the end time must be after the start time. + Any segment of a video which has one or more labels on it, is + considered a hard negative for all other labels. Any segment with no + labels on it is considered to be unknown. If a whole video is + unknown, then it should be mentioned just once with ",," in place of + ``LABEL, TIME_SEGMENT_START,TIME_SEGMENT_END``. + + Sample top level CSV file: + + :: + + TRAIN,gs://folder/train_videos.csv + TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv + + Sample rows of a CSV file for a particular ML_USE: + + :: + + gs://folder/video1.avi,car,120,180.000021 + gs://folder/video1.avi,bike,150,180.000021 + gs://folder/vid2.avi,car,0,60.5 + gs://folder/vid3.avi,,, + + .. raw:: html + +
Object Tracking
+ + See `Preparing your training + data `__ + for more information. + + CSV file(s) with each line in format: + + :: + + ML_USE,GCS_FILE_PATH + + For ``ML_USE``, do not use ``VALIDATE``. + + ``GCS_FILE_PATH`` is the path to another .csv file that describes + training example for a given ``ML_USE``, using the following row + format: + + :: + + GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX + + or + + :: + + GCS_FILE_PATH,,,,,,,,,, + + Here ``GCS_FILE_PATH`` leads to a video of up to 50GB in size and up + to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. + Providing ``INSTANCE_ID``\ s can help to obtain a better model. When + a specific labeled entity leaves the video frame, and shows up + afterwards it is not required, albeit preferable, that the same + ``INSTANCE_ID`` is given to it. + + ``TIMESTAMP`` must be within the length of the video, the + ``BOUNDING_BOX`` is assumed to be drawn on the closest video's frame + to the ``TIMESTAMP``. Any mentioned by the ``TIMESTAMP`` frame is + expected to be exhaustively labeled and no more than 500 + ``BOUNDING_BOX``-es per frame are allowed. If a whole video is + unknown, then it should be mentioned just once with ",,,,,,,,,," in + place of ``LABEL, [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX``. + + Sample top level CSV file: + + :: + + TRAIN,gs://folder/train_videos.csv + TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv + + Seven sample rows of a CSV file for a particular ML_USE: + + :: + + gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 + gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 + gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 + gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, + gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, + gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, + gs://folder/video2.avi,,,,,,,,,,, + + .. raw:: html + +
+
+ + .. raw:: html + +

AutoML Natural Language

+ + .. raw:: html + +
Entity Extraction
+ + See `Preparing your training + data `__ for + more information. + + One or more CSV file(s) with each line in the following format: + + :: + + ML_USE,GCS_FILE_PATH + + - ``ML_USE`` - Identifies the data set that the current row (file) + applies to. This value can be one of the following: + + - ``TRAIN`` - Rows in this file are used to train the model. + - ``TEST`` - Rows in this file are used to test the model during + training. + - ``UNASSIGNED`` - Rows in this file are not categorized. They + are Automatically divided into train and test data. 80% for + training and 20% for testing.. + + - ``GCS_FILE_PATH`` - a Identifies JSON Lines (.JSONL) file stored + in Google Cloud Storage that contains in-line text in-line as + documents for model training. + + After the training data set has been determined from the ``TRAIN`` + and ``UNASSIGNED`` CSV files, the training data is divided into + train and validation data sets. 70% for training and 30% for + validation. + + For example: + + :: + + TRAIN,gs://folder/file1.jsonl + VALIDATE,gs://folder/file2.jsonl + TEST,gs://folder/file3.jsonl + + **In-line JSONL files** + + In-line .JSONL files contain, per line, a JSON document that wraps a + [``text_snippet``][google.cloud.automl.v1.TextSnippet] field + followed by one or more + [``annotations``][google.cloud.automl.v1.AnnotationPayload] fields, + which have ``display_name`` and ``text_extraction`` fields to + describe the entity from the text snippet. Multiple JSON documents + can be separated using line breaks (\n). + + The supplied text must be annotated exhaustively. For example, if + you include the text "horse", but do not label it as "animal", then + "horse" is assumed to not be an "animal". + + Any given text snippet content must have 30,000 characters or less, + and also be UTF-8 NFC encoded. ASCII is accepted as it is UTF-8 NFC + encoded. + + For example: + + :: + + { + "text_snippet": { + "content": "dog car cat" + }, + "annotations": [ + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 0, "end_offset": 2} + } + }, + { + "display_name": "vehicle", + "text_extraction": { + "text_segment": {"start_offset": 4, "end_offset": 6} + } + }, + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 8, "end_offset": 10} + } + } + ] + }\n + { + "text_snippet": { + "content": "This dog is good." + }, + "annotations": [ + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 5, "end_offset": 7} + } + } + ] + } + + **JSONL files that reference documents** + + .JSONL files contain, per line, a JSON document that wraps a + ``input_config`` that contains the path to a source document. + Multiple JSON documents can be separated using line breaks (\n). + + Supported document extensions: .PDF, .TIF, .TIFF + + For example: + + :: + + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] + } + } + } + }\n + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] + } + } + } + } + + **In-line JSONL files with document layout information** + + **Note:** You can only annotate documents using the UI. The format + described below applies to annotated documents exported using the UI + or ``exportData``. + + In-line .JSONL files for documents contain, per line, a JSON + document that wraps a ``document`` field that provides the textual + content of the document and the layout information. + + For example: + + :: + + { + "document": { + "document_text": { + "content": "dog car cat" + } + "layout": [ + { + "text_segment": { + "start_offset": 0, + "end_offset": 11, + }, + "page_number": 1, + "bounding_poly": { + "normalized_vertices": [ + {"x": 0.1, "y": 0.1}, + {"x": 0.1, "y": 0.3}, + {"x": 0.3, "y": 0.3}, + {"x": 0.3, "y": 0.1}, + ], + }, + "text_segment_type": TOKEN, + } + ], + "document_dimensions": { + "width": 8.27, + "height": 11.69, + "unit": INCH, + } + "page_count": 3, + }, + "annotations": [ + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 0, "end_offset": 3} + } + }, + { + "display_name": "vehicle", + "text_extraction": { + "text_segment": {"start_offset": 4, "end_offset": 7} + } + }, + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 8, "end_offset": 11} + } + }, + ], + + .. raw:: html + +
Classification
+ + See `Preparing your training + data `__ + for more information. + + One or more CSV file(s) with each line in the following format: + + :: + + ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,... + + - ``ML_USE`` - Identifies the data set that the current row (file) + applies to. This value can be one of the following: + + - ``TRAIN`` - Rows in this file are used to train the model. + - ``TEST`` - Rows in this file are used to test the model during + training. + - ``UNASSIGNED`` - Rows in this file are not categorized. They + are Automatically divided into train and test data. 80% for + training and 20% for testing. + + - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a + pattern. If the column content is a valid Google Cloud Storage + file path, that is, prefixed by "gs://", it is treated as a + ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in + double quotes (""), it is treated as a ``TEXT_SNIPPET``. For + ``GCS_FILE_PATH``, the path must lead to a file with supported + extension and UTF-8 encoding, for example, + "gs://folder/content.txt" AutoML imports the file content as a + text snippet. For ``TEXT_SNIPPET``, AutoML imports the column + content excluding quotes. In both cases, size of the content must + be 10MB or less in size. For zip files, the size of each file + inside the zip must be 10MB or less in size. + + For the ``MULTICLASS`` classification type, at most one ``LABEL`` + is allowed. + + The ``ML_USE`` and ``LABEL`` columns are optional. Supported file + extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP + + A maximum of 100 unique labels are allowed per CSV row. + + Sample rows: + + :: + + TRAIN,"They have bad food and very rude",RudeService,BadFood + gs://folder/content.txt,SlowService + TEST,gs://folder/document.pdf + VALIDATE,gs://folder/text_files.zip,BadFood + + .. raw:: html + +
Sentiment Analysis
+ + See `Preparing your training + data `__ + for more information. + + CSV file(s) with each line in format: + + :: + + ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT + + - ``ML_USE`` - Identifies the data set that the current row (file) + applies to. This value can be one of the following: + + - ``TRAIN`` - Rows in this file are used to train the model. + - ``TEST`` - Rows in this file are used to test the model during + training. + - ``UNASSIGNED`` - Rows in this file are not categorized. They + are Automatically divided into train and test data. 80% for + training and 20% for testing. + + - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a + pattern. If the column content is a valid Google Cloud Storage + file path, that is, prefixed by "gs://", it is treated as a + ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in + double quotes (""), it is treated as a ``TEXT_SNIPPET``. For + ``GCS_FILE_PATH``, the path must lead to a file with supported + extension and UTF-8 encoding, for example, + "gs://folder/content.txt" AutoML imports the file content as a + text snippet. For ``TEXT_SNIPPET``, AutoML imports the column + content excluding quotes. In both cases, size of the content must + be 128kB or less in size. For zip files, the size of each file + inside the zip must be 128kB or less in size. + + The ``ML_USE`` and ``SENTIMENT`` columns are optional. Supported + file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP + + - ``SENTIMENT`` - An integer between 0 and + Dataset.text_sentiment_dataset_metadata.sentiment_max + (inclusive). Describes the ordinal of the sentiment - higher + value means a more positive sentiment. All the values are + completely relative, i.e. neither 0 needs to mean a negative or + neutral sentiment nor sentiment_max needs to mean a positive one + - it is just required that 0 is the least positive sentiment in + the data, and sentiment_max is the most positive one. The + SENTIMENT shouldn't be confused with "score" or "magnitude" from + the previous Natural Language Sentiment Analysis API. All + SENTIMENT values between 0 and sentiment_max must be represented + in the imported data. On prediction the same 0 to sentiment_max + range will be used. The difference between neighboring sentiment + values needs not to be uniform, e.g. 1 and 2 may be similar + whereas the difference between 2 and 3 may be large. + + Sample rows: + + :: + + TRAIN,"@freewrytin this is way too good for your product",2 + gs://folder/content.txt,3 + TEST,gs://folder/document.pdf + VALIDATE,gs://folder/text_files.zip,2 + + .. raw:: html + +
+
+ + .. raw:: html + +

AutoML Tables

+ + See `Preparing your training + data `__ for + more information. + + You can use either + [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or + [bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source]. + All input is concatenated into a single + + [primary_table_spec_id][google.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id] + + **For gcs_source:** + + CSV file(s), where the first row of the first file is the header, + containing unique column names. If the first row of a subsequent + file is the same as the header, then it is also treated as a header. + All other rows contain values for the corresponding columns. + + Each .CSV file by itself must be 10GB or smaller, and their total + size must be 100GB or smaller. + + First three sample rows of a CSV file: + + .. raw:: html + +
+        "Id","First Name","Last Name","Dob","Addresses"
+
+        "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
+
+        "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
+        
+ + **For bigquery_source:** + + An URI of a BigQuery table. The user data size of the BigQuery table + must be 100GB or smaller. + + An imported table must have between 2 and 1,000 columns, inclusive, + and between 1000 and 100,000,000 rows, inclusive. There are at most + 5 import data running in parallel. + + .. raw:: html + +
+
+ + **Input field definitions:** + + ``ML_USE`` : ("TRAIN" \| "VALIDATE" \| "TEST" \| "UNASSIGNED") + Describes how the given example (file) should be used for model + training. "UNASSIGNED" can be used when user has no preference. + + ``GCS_FILE_PATH`` : The path to a file on Google Cloud Storage. For + example, "gs://folder/image1.png". + + ``LABEL`` : A display name of an object on an image, video etc., + e.g. "dog". Must be up to 32 characters long and can consist only of + ASCII Latin letters A-Z and a-z, underscores(_), and ASCII digits + 0-9. For each label an AnnotationSpec is created which display_name + becomes the label; AnnotationSpecs are given back in predictions. + + ``INSTANCE_ID`` : A positive integer that identifies a specific + instance of a labeled entity on an example. Used e.g. to track two + cars on a video while being able to tell apart which one is which. + + ``BOUNDING_BOX`` : (``VERTEX,VERTEX,VERTEX,VERTEX`` \| + ``VERTEX,,,VERTEX,,``) A rectangle parallel to the frame of the + example (image, video). If 4 vertices are given they are connected + by edges in the order provided, if 2 are given they are recognized + as diagonally opposite vertices of the rectangle. + + ``VERTEX`` : (``COORDINATE,COORDINATE``) First coordinate is + horizontal (x), the second is vertical (y). + + ``COORDINATE`` : A float in 0 to 1 range, relative to total length + of image or video in given dimension. For fractions the leading + non-decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is in top + left. + + ``TIME_SEGMENT_START`` : (``TIME_OFFSET``) Expresses a beginning, + inclusive, of a time segment within an example that has a time + dimension (e.g. video). + + ``TIME_SEGMENT_END`` : (``TIME_OFFSET``) Expresses an end, + exclusive, of a time segment within n example that has a time + dimension (e.g. video). + + ``TIME_OFFSET`` : A number of seconds as measured from the start of + an example (e.g. video). Fractions are allowed, up to a microsecond + precision. "inf" is allowed, and it means the end of the example. + + ``TEXT_SNIPPET`` : The content of a text snippet, UTF-8 encoded, + enclosed within double quotes (""). + + ``DOCUMENT`` : A field that provides the textual content with + document and the layout information. + + **Errors:** + + If any of the provided CSV files can't be parsed or if more than + certain percent of CSV rows cannot be processed then the operation + fails and nothing is imported. Regardless of overall success or + failure the per-row failures, up to a certain count cap, is listed + in Operation.metadata.partial_failures. + + Attributes: + gcs_source (~.io.GcsSource): + The Google Cloud Storage location for the input content. For + [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], + ``gcs_source`` points to a CSV file with a structure + described in + [InputConfig][google.cloud.automl.v1.InputConfig]. + params (Sequence[~.io.InputConfig.ParamsEntry]): + Additional domain-specific parameters describing the + semantic of the imported data, any string must be up to + 25000 characters long. + + .. raw:: html + +

AutoML Tables

+ + ``schema_inference_version`` : (integer) This value must be + supplied. The version of the algorithm to use for the + initial inference of the column data types of the imported + table. Allowed values: "1". + """ + + gcs_source = proto.Field( + proto.MESSAGE, number=1, oneof="source", message="GcsSource", + ) + + params = proto.MapField(proto.STRING, proto.STRING, number=2) + + +class BatchPredictInputConfig(proto.Message): + r"""Input configuration for BatchPredict Action. + + The format of input depends on the ML problem of the model used for + prediction. As input source the + [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] is + expected, unless specified otherwise. + + The formats are represented in EBNF with commas being literal and + with non-terminal symbols defined near the end of this comment. The + formats are: + + .. raw:: html + +

AutoML Vision

+
Classification
+ + One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH + + The Google Cloud Storage location of an image of up to 30MB in size. + Supported extensions: .JPEG, .GIF, .PNG. This path is treated as the + ID in the batch predict output. + + Sample rows: + + :: + + gs://folder/image1.jpeg + gs://folder/image2.gif + gs://folder/image3.png + + .. raw:: html + +
Object Detection
+ + One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH + + The Google Cloud Storage location of an image of up to 30MB in size. + Supported extensions: .JPEG, .GIF, .PNG. This path is treated as the + ID in the batch predict output. + + Sample rows: + + :: + + gs://folder/image1.jpeg + gs://folder/image2.gif + gs://folder/image3.png + + .. raw:: html + +
+
+ + .. raw:: html + +

AutoML Video Intelligence

+
Classification
+ + One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END + + ``GCS_FILE_PATH`` is the Google Cloud Storage location of video up + to 50GB in size and up to 3h in duration duration. Supported + extensions: .MOV, .MPEG4, .MP4, .AVI. + + ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the + length of the video, and the end time must be after the start time. + + Sample rows: + + :: + + gs://folder/video1.mp4,10,40 + gs://folder/video1.mp4,20,60 + gs://folder/vid2.mov,0,inf + + .. raw:: html + +
Object Tracking
+ + One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END + + ``GCS_FILE_PATH`` is the Google Cloud Storage location of video up + to 50GB in size and up to 3h in duration duration. Supported + extensions: .MOV, .MPEG4, .MP4, .AVI. + + ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the + length of the video, and the end time must be after the start time. + + Sample rows: + + :: + + gs://folder/video1.mp4,10,40 + gs://folder/video1.mp4,20,60 + gs://folder/vid2.mov,0,inf + + .. raw:: html + +
+
+ + .. raw:: html + +

AutoML Natural Language

+
Classification
+ + One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH + + ``GCS_FILE_PATH`` is the Google Cloud Storage location of a text + file. Supported file extensions: .TXT, .PDF, .TIF, .TIFF + + Text files can be no larger than 10MB in size. + + Sample rows: + + :: + + gs://folder/text1.txt + gs://folder/text2.pdf + gs://folder/text3.tif + + .. raw:: html + +
Sentiment Analysis
+ One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH + + ``GCS_FILE_PATH`` is the Google Cloud Storage location of a text + file. Supported file extensions: .TXT, .PDF, .TIF, .TIFF + + Text files can be no larger than 128kB in size. + + Sample rows: + + :: + + gs://folder/text1.txt + gs://folder/text2.pdf + gs://folder/text3.tif + + .. raw:: html + +
Entity Extraction
+ + One or more JSONL (JSON Lines) files that either provide inline text + or documents. You can only use one format, either inline text or + documents, for a single call to [AutoMl.BatchPredict]. + + Each JSONL file contains a per line a proto that wraps a temporary + user-assigned TextSnippet ID (string up to 2000 characters long) + called "id", a TextSnippet proto (in JSON representation) and zero + or more TextFeature protos. Any given text snippet content must have + 30,000 characters or less, and also be UTF-8 NFC encoded (ASCII + already is). The IDs provided should be unique. + + Each document JSONL file contains, per line, a proto that wraps a + Document proto with ``input_config`` set. Each document cannot + exceed 2MB in size. + + Supported document extensions: .PDF, .TIF, .TIFF + + Each JSONL file must not exceed 100MB in size, and no more than 20 + JSONL files may be passed. + + Sample inline JSONL file (Shown with artificial line breaks. Actual + line breaks are denoted by "\n".): + + :: + + { + "id": "my_first_id", + "text_snippet": { "content": "dog car cat"}, + "text_features": [ + { + "text_segment": {"start_offset": 4, "end_offset": 6}, + "structural_type": PARAGRAPH, + "bounding_poly": { + "normalized_vertices": [ + {"x": 0.1, "y": 0.1}, + {"x": 0.1, "y": 0.3}, + {"x": 0.3, "y": 0.3}, + {"x": 0.3, "y": 0.1}, + ] + }, + } + ], + }\n + { + "id": "2", + "text_snippet": { + "content": "Extended sample content", + "mime_type": "text/plain" + } + } + + Sample document JSONL file (Shown with artificial line breaks. + Actual line breaks are denoted by "\n".): + + :: + + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] + } + } + } + }\n + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] + } + } + } + } + + .. raw:: html + +
+
+ + .. raw:: html + +

AutoML Tables

+ + See `Preparing your training + data `__ + for more information. + + You can use either + [gcs_source][google.cloud.automl.v1.BatchPredictInputConfig.gcs_source] + or [bigquery_source][BatchPredictInputConfig.bigquery_source]. + + **For gcs_source:** + + CSV file(s), each by itself 10GB or smaller and total size must be + 100GB or smaller, where first file must have a header containing + column names. If the first row of a subsequent file is the same as + the header, then it is also treated as a header. All other rows + contain values for the corresponding columns. + + The column names must contain the model's + + [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs] + [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] + (order doesn't matter). The columns corresponding to the model's + input feature column specs must contain values compatible with the + column spec's data types. Prediction on all the rows, i.e. the CSV + lines, will be attempted. + + Sample rows from a CSV file: + + .. raw:: html + +
+        "First Name","Last Name","Dob","Addresses"
+
+        "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
+
+        "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
+        
+ + **For bigquery_source:** + + The URI of a BigQuery table. The user data size of the BigQuery + table must be 100GB or smaller. + + The column names must contain the model's + + [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs] + [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] + (order doesn't matter). The columns corresponding to the model's + input feature column specs must contain values compatible with the + column spec's data types. Prediction on all the rows of the table + will be attempted. + + .. raw:: html + +
+
+ + **Input field definitions:** + + ``GCS_FILE_PATH`` : The path to a file on Google Cloud Storage. For + example, "gs://folder/video.avi". + + ``TIME_SEGMENT_START`` : (``TIME_OFFSET``) Expresses a beginning, + inclusive, of a time segment within an example that has a time + dimension (e.g. video). + + ``TIME_SEGMENT_END`` : (``TIME_OFFSET``) Expresses an end, + exclusive, of a time segment within n example that has a time + dimension (e.g. video). + + ``TIME_OFFSET`` : A number of seconds as measured from the start of + an example (e.g. video). Fractions are allowed, up to a microsecond + precision. "inf" is allowed, and it means the end of the example. + + **Errors:** + + If any of the provided CSV files can't be parsed or if more than + certain percent of CSV rows cannot be processed then the operation + fails and prediction does not happen. Regardless of overall success + or failure the per-row failures, up to a certain count cap, will be + listed in Operation.metadata.partial_failures. + + Attributes: + gcs_source (~.io.GcsSource): + Required. The Google Cloud Storage location + for the input content. + """ + + gcs_source = proto.Field( + proto.MESSAGE, number=1, oneof="source", message="GcsSource", + ) + + +class DocumentInputConfig(proto.Message): + r"""Input configuration of a + [Document][google.cloud.automl.v1.Document]. + + Attributes: + gcs_source (~.io.GcsSource): + The Google Cloud Storage location of the + document file. Only a single path should be + given. + Max supported size: 512MB. + + Supported extensions: .PDF. + """ + + gcs_source = proto.Field(proto.MESSAGE, number=1, message="GcsSource",) + + +class OutputConfig(proto.Message): + r"""- For Translation: CSV file ``translation.csv``, with each line in + format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV file + which describes examples that have given ML_USE, using the + following row format per line: TEXT_SNIPPET (in source language) + \\t TEXT_SNIPPET (in target language) + + - For Tables: Output depends on whether the dataset was imported + from Google Cloud Storage or BigQuery. Google Cloud Storage + case: + + [gcs_destination][google.cloud.automl.v1p1beta.OutputConfig.gcs_destination] + must be set. Exported are CSV file(s) ``tables_1.csv``, + ``tables_2.csv``,...,\ ``tables_N.csv`` with each having as header + line the table's column names, and all other lines contain values + for the header columns. BigQuery case: + + [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] + pointing to a BigQuery project must be set. In the given project a + new dataset will be created with name + + ``export_data__`` + where will be made BigQuery-dataset-name compatible (e.g. most + special characters will become underscores), and timestamp will be + in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that + dataset a new table called ``primary_table`` will be created, and + filled with precisely the same data as this obtained on import. + + Attributes: + gcs_destination (~.io.GcsDestination): + Required. The Google Cloud Storage location where the output + is to be written to. For Image Object Detection, Text + Extraction, Video Classification and Tables, in the given + directory a new directory will be created with name: + export_data-- where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ + ISO-8601 format. All export output will be written into that + directory. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, number=1, oneof="destination", message="GcsDestination", + ) + + +class BatchPredictOutputConfig(proto.Message): + r"""Output configuration for BatchPredict Action. + + As destination the + + [gcs_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs_destination] + must be set unless specified otherwise for a domain. If + gcs_destination is set then in the given directory a new directory + is created. Its name will be "prediction--", where timestamp is in + YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends + on the ML problem the predictions are made for. + + - For Image Classification: In the created directory files + ``image_classification_1.jsonl``, + ``image_classification_2.jsonl``,...,\ ``image_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of the successfully predicted images and annotations. A + single image will be listed only once with all its annotations, + and its annotations will never be split across files. Each .JSONL + file will contain, per line, a JSON representation of a proto + that wraps image's "ID" : "" followed by a list of zero + or more AnnotationPayload protos (called annotations), which have + classification detail populated. If prediction for any image + failed (partially or completely), then an additional + ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` + files will be created (N depends on total number of failed + predictions). These files will have a JSON representation of a + proto that wraps the same "ID" : "" but here followed + by exactly one + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only ``code`` and ``message``\ fields. + + - For Image Object Detection: In the created directory files + ``image_object_detection_1.jsonl``, + ``image_object_detection_2.jsonl``,...,\ ``image_object_detection_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of the successfully predicted images and annotations. Each + .JSONL file will contain, per line, a JSON representation of a + proto that wraps image's "ID" : "" followed by a list + of zero or more AnnotationPayload protos (called annotations), + which have image_object_detection detail populated. A single + image will be listed only once with all its annotations, and its + annotations will never be split across files. If prediction for + any image failed (partially or completely), then additional + ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` + files will be created (N depends on total number of failed + predictions). These files will have a JSON representation of a + proto that wraps the same "ID" : "" but here followed + by exactly one + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only ``code`` and ``message``\ fields. + + - For Video Classification: In the created directory a + video_classification.csv file, and a .JSON file per each video + classification requested in the input (i.e. each line in given + CSV(s)), will be created. + + :: + + The format of video_classification.csv is: + + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS + where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 + to 1 the prediction input lines (i.e. video_classification.csv has + precisely the same number of lines as the prediction input had.) + JSON_FILE_NAME = Name of .JSON file in the output directory, which + contains prediction responses for the video time segment. STATUS = + "OK" if prediction completed successfully, or an error code with + message otherwise. If STATUS is not "OK" then the .JSON file for + that line may not exist or be empty. + + :: + + Each .JSON file, assuming STATUS is "OK", will contain a list of + AnnotationPayload protos in JSON format, which are the predictions + for the video time segment the file is assigned to in the + video_classification.csv. All AnnotationPayload protos will have + video_classification field set, and will be sorted by + video_classification.type field (note that the returned types are + governed by `classifaction_types` parameter in + [PredictService.BatchPredictRequest.params][]). + + - For Video Object Tracking: In the created directory a + video_object_tracking.csv file will be created, and multiple + files video_object_trackinng_1.json, + video_object_trackinng_2.json,..., video_object_trackinng_N.json, + where N is the number of requests in the input (i.e. the number + of lines in given CSV(s)). + + :: + + The format of video_object_tracking.csv is: + + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS + where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 + to 1 the prediction input lines (i.e. video_object_tracking.csv has + precisely the same number of lines as the prediction input had.) + JSON_FILE_NAME = Name of .JSON file in the output directory, which + contains prediction responses for the video time segment. STATUS = + "OK" if prediction completed successfully, or an error code with + message otherwise. If STATUS is not "OK" then the .JSON file for + that line may not exist or be empty. + + :: + + Each .JSON file, assuming STATUS is "OK", will contain a list of + AnnotationPayload protos in JSON format, which are the predictions + for each frame of the video time segment the file is assigned to in + video_object_tracking.csv. All AnnotationPayload protos will have + video_object_tracking field set. + + - For Text Classification: In the created directory files + ``text_classification_1.jsonl``, + ``text_classification_2.jsonl``,...,\ ``text_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of inputs and annotations found. + + :: + + Each .JSONL file will contain, per line, a JSON representation of a + proto that wraps input text file (or document) in + the text snippet (or document) proto and a list of + zero or more AnnotationPayload protos (called annotations), which + have classification detail populated. A single text file (or + document) will be listed only once with all its annotations, and its + annotations will never be split across files. + + If prediction for any input file (or document) failed (partially or + completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input file followed by exactly one + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only ``code`` and ``message``. + + - For Text Sentiment: In the created directory files + ``text_sentiment_1.jsonl``, + ``text_sentiment_2.jsonl``,...,\ ``text_sentiment_N.jsonl`` will + be created, where N may be 1, and depends on the total number of + inputs and annotations found. + + :: + + Each .JSONL file will contain, per line, a JSON representation of a + proto that wraps input text file (or document) in + the text snippet (or document) proto and a list of + zero or more AnnotationPayload protos (called annotations), which + have text_sentiment detail populated. A single text file (or + document) will be listed only once with all its annotations, and its + annotations will never be split across files. + + If prediction for any input file (or document) failed (partially or + completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input file followed by exactly one + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only ``code`` and ``message``. + + - For Text Extraction: In the created directory files + ``text_extraction_1.jsonl``, + ``text_extraction_2.jsonl``,...,\ ``text_extraction_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of inputs and annotations found. The contents of these + .JSONL file(s) depend on whether the input used inline text, or + documents. If input was inline, then each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + given in request text snippet's "id" (if specified), followed by + input text snippet, and a list of zero or more AnnotationPayload + protos (called annotations), which have text_extraction detail + populated. A single text snippet will be listed only once with + all its annotations, and its annotations will never be split + across files. If input used documents, then each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + given in request document proto, followed by its OCR-ed + representation in the form of a text snippet, finally followed by + a list of zero or more AnnotationPayload protos (called + annotations), which have text_extraction detail populated and + refer, via their indices, to the OCR-ed text snippet. A single + document (and its text snippet) will be listed only once with all + its annotations, and its annotations will never be split across + files. If prediction for any text snippet failed (partially or + completely), then additional ``errors_1.jsonl``, + ``errors_2.jsonl``,..., ``errors_N.jsonl`` files will be created + (N depends on total number of failed predictions). These files + will have a JSON representation of a proto that wraps either the + "id" : "" (in case of inline) or the document proto (in + case of document) but here followed by exactly one + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only ``code`` and ``message``. + + - For Tables: Output depends on whether + + [gcs_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destination] + or + + [bigquery_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.bigquery_destination] + is set (either is allowed). Google Cloud Storage case: In the + created directory files ``tables_1.csv``, ``tables_2.csv``,..., + ``tables_N.csv`` will be created, where N may be 1, and depends on + the total number of the successfully predicted rows. For all + CLASSIFICATION + + [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: + Each .csv file will contain a header, listing all columns' + + [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] + given on input followed by M target column names in the format of + + "<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>\_\_score" + where M is the number of distinct target values, i.e. number of + distinct values in the target column of the table used to train the + model. Subsequent lines will contain the respective values of + successfully predicted rows, with the last, i.e. the target, columns + having the corresponding prediction + [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score]. For + REGRESSION and FORECASTING + + [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: + Each .csv file will contain a header, listing all columns' + [display_name-s][google.cloud.automl.v1p1beta.display_name] given on + input followed by the predicted target column with name in the + format of + + "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" + Subsequent lines will contain the respective values of successfully + predicted rows, with the last, i.e. the target, column having the + predicted target value. If prediction for any rows failed, then an + additional ``errors_1.csv``, ``errors_2.csv``,..., ``errors_N.csv`` + will be created (N depends on total number of failed rows). These + files will have analogous format as ``tables_*.csv``, but always + with a single target column having + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + represented as a JSON string, and containing only ``code`` and + ``message``. BigQuery case: + + [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] + pointing to a BigQuery project must be set. In the given project a + new dataset will be created with name + ``prediction__`` + where will be made BigQuery-dataset-name compatible (e.g. most + special characters will become underscores), and timestamp will be + in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the + dataset two tables will be created, ``predictions``, and ``errors``. + The ``predictions`` table's column names will be the input columns' + + [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] + followed by the target column with name in the format of + + "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" + The input feature columns will contain the respective values of + successfully predicted rows, with the target column having an ARRAY + of + + [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload], + represented as STRUCT-s, containing + [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation]. + The ``errors`` table contains rows for which the prediction has + failed, it has analogous input columns while the target column name + is in the format of + + "errors_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>", + and as a value has + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + represented as a STRUCT, and containing only ``code`` and + ``message``. + + Attributes: + gcs_destination (~.io.GcsDestination): + Required. The Google Cloud Storage location + of the directory where the output is to be + written to. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, number=1, oneof="destination", message="GcsDestination", + ) + + +class ModelExportOutputConfig(proto.Message): + r"""Output configuration for ModelExport Action. + + Attributes: + gcs_destination (~.io.GcsDestination): + Required. The Google Cloud Storage location where the model + is to be written to. This location may only be set for the + following model formats: "tflite", "edgetpu_tflite", + "tf_saved_model", "tf_js", "core_ml". + + Under the directory given as the destination a new one with + name "model-export--", where timestamp is in + YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created. + Inside the model and any of its supporting files will be + written. + model_format (str): + The format in which the model must be exported. The + available, and default, formats depend on the problem and + model type (if given problem and type combination doesn't + have a format listed, it means its models are not + exportable): + + - For Image Classification mobile-low-latency-1, + mobile-versatile-1, mobile-high-accuracy-1: "tflite" + (default), "edgetpu_tflite", "tf_saved_model", "tf_js", + "docker". + + - For Image Classification mobile-core-ml-low-latency-1, + mobile-core-ml-versatile-1, + mobile-core-ml-high-accuracy-1: "core_ml" (default). + + - For Image Object Detection mobile-low-latency-1, + mobile-versatile-1, mobile-high-accuracy-1: "tflite", + "tf_saved_model", "tf_js". Formats description: + + - tflite - Used for Android mobile devices. + + - edgetpu_tflite - Used for `Edge + TPU `__ devices. + + - tf_saved_model - A tensorflow model in SavedModel format. + + - tf_js - A + `TensorFlow.js `__ model + that can be used in the browser and in Node.js using + JavaScript. + + - docker - Used for Docker containers. Use the params field + to customize the container. The container is verified to + work correctly on ubuntu 16.04 operating system. See more + at [containers + + quickstart](https: + //cloud.google.com/vision/automl/docs/containers-gcs-quickstart) + + - core_ml - Used for iOS mobile devices. + params (Sequence[~.io.ModelExportOutputConfig.ParamsEntry]): + Additional model-type and format specific parameters + describing the requirements for the to be exported model + files, any string must be up to 25000 characters long. + + - For ``docker`` format: ``cpu_architecture`` - (string) + "x86_64" (default). ``gpu_architecture`` - (string) + "none" (default), "nvidia". + """ + + gcs_destination = proto.Field( + proto.MESSAGE, number=1, oneof="destination", message="GcsDestination", + ) + + model_format = proto.Field(proto.STRING, number=4) + + params = proto.MapField(proto.STRING, proto.STRING, number=2) + + +class GcsSource(proto.Message): + r"""The Google Cloud Storage location for the input content. + + Attributes: + input_uris (Sequence[str]): + Required. Google Cloud Storage URIs to input files, up to + 2000 characters long. Accepted forms: + + - Full object path, e.g. gs://bucket/directory/object.csv + """ + + input_uris = proto.RepeatedField(proto.STRING, number=1) + + +class GcsDestination(proto.Message): + r"""The Google Cloud Storage location where the output is to be + written to. + + Attributes: + output_uri_prefix (str): + Required. Google Cloud Storage URI to output directory, up + to 2000 characters long. Accepted forms: + + - Prefix path: gs://bucket/directory The requesting user + must have write permission to the bucket. The directory + is created if it doesn't exist. + """ + + output_uri_prefix = proto.Field(proto.STRING, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/model.py b/google/cloud/automl_v1/types/model.py new file mode 100644 index 00000000..14664ae2 --- /dev/null +++ b/google/cloud/automl_v1/types/model.py @@ -0,0 +1,151 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1.types import image +from google.cloud.automl_v1.types import text +from google.cloud.automl_v1.types import translation +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module(package="google.cloud.automl.v1", manifest={"Model",},) + + +class Model(proto.Message): + r"""API proto representing a trained machine learning model. + + Attributes: + translation_model_metadata (~.translation.TranslationModelMetadata): + Metadata for translation models. + image_classification_model_metadata (~.image.ImageClassificationModelMetadata): + Metadata for image classification models. + text_classification_model_metadata (~.text.TextClassificationModelMetadata): + Metadata for text classification models. + image_object_detection_model_metadata (~.image.ImageObjectDetectionModelMetadata): + Metadata for image object detection models. + text_extraction_model_metadata (~.text.TextExtractionModelMetadata): + Metadata for text extraction models. + text_sentiment_model_metadata (~.text.TextSentimentModelMetadata): + Metadata for text sentiment models. + name (str): + Output only. Resource name of the model. Format: + ``projects/{project_id}/locations/{location_id}/models/{model_id}`` + display_name (str): + Required. The name of the model to show in the interface. + The name can be up to 32 characters long and can consist + only of ASCII Latin letters A-Z and a-z, underscores (_), + and ASCII digits 0-9. It must start with a letter. + dataset_id (str): + Required. The resource ID of the dataset used + to create the model. The dataset must come from + the same ancestor project and location. + create_time (~.timestamp.Timestamp): + Output only. Timestamp when the model + training finished and can be used for + prediction. + update_time (~.timestamp.Timestamp): + Output only. Timestamp when this model was + last updated. + deployment_state (~.model.Model.DeploymentState): + Output only. Deployment state of the model. A + model can only serve prediction requests after + it gets deployed. + etag (str): + Used to perform a consistent read-modify- + rite updates. If not set, a blind "overwrite" + update happens. + labels (Sequence[~.model.Model.LabelsEntry]): + Optional. The labels with user-defined + metadata to organize your model. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. Label values are optional. Label + keys must start with a letter. + See https://goo.gl/xmQnxf for more information + on and examples of labels. + """ + + class DeploymentState(proto.Enum): + r"""Deployment state of the model.""" + DEPLOYMENT_STATE_UNSPECIFIED = 0 + DEPLOYED = 1 + UNDEPLOYED = 2 + + translation_model_metadata = proto.Field( + proto.MESSAGE, + number=15, + oneof="model_metadata", + message=translation.TranslationModelMetadata, + ) + + image_classification_model_metadata = proto.Field( + proto.MESSAGE, + number=13, + oneof="model_metadata", + message=image.ImageClassificationModelMetadata, + ) + + text_classification_model_metadata = proto.Field( + proto.MESSAGE, + number=14, + oneof="model_metadata", + message=text.TextClassificationModelMetadata, + ) + + image_object_detection_model_metadata = proto.Field( + proto.MESSAGE, + number=20, + oneof="model_metadata", + message=image.ImageObjectDetectionModelMetadata, + ) + + text_extraction_model_metadata = proto.Field( + proto.MESSAGE, + number=19, + oneof="model_metadata", + message=text.TextExtractionModelMetadata, + ) + + text_sentiment_model_metadata = proto.Field( + proto.MESSAGE, + number=22, + oneof="model_metadata", + message=text.TextSentimentModelMetadata, + ) + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + dataset_id = proto.Field(proto.STRING, number=3) + + create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=11, message=timestamp.Timestamp,) + + deployment_state = proto.Field(proto.ENUM, number=8, enum=DeploymentState,) + + etag = proto.Field(proto.STRING, number=10) + + labels = proto.MapField(proto.STRING, proto.STRING, number=34) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/model_evaluation.py b/google/cloud/automl_v1/types/model_evaluation.py new file mode 100644 index 00000000..39736edc --- /dev/null +++ b/google/cloud/automl_v1/types/model_evaluation.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1.types import classification +from google.cloud.automl_v1.types import detection +from google.cloud.automl_v1.types import text_extraction +from google.cloud.automl_v1.types import text_sentiment +from google.cloud.automl_v1.types import translation +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", manifest={"ModelEvaluation",}, +) + + +class ModelEvaluation(proto.Message): + r"""Evaluation results of a model. + + Attributes: + classification_evaluation_metrics (~.classification.ClassificationEvaluationMetrics): + Model evaluation metrics for image, text, + video and tables classification. + Tables problem is considered a classification + when the target column is CATEGORY DataType. + translation_evaluation_metrics (~.translation.TranslationEvaluationMetrics): + Model evaluation metrics for translation. + image_object_detection_evaluation_metrics (~.detection.ImageObjectDetectionEvaluationMetrics): + Model evaluation metrics for image object + detection. + text_sentiment_evaluation_metrics (~.text_sentiment.TextSentimentEvaluationMetrics): + Evaluation metrics for text sentiment models. + text_extraction_evaluation_metrics (~.text_extraction.TextExtractionEvaluationMetrics): + Evaluation metrics for text extraction + models. + name (str): + Output only. Resource name of the model evaluation. Format: + + ``projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}`` + annotation_spec_id (str): + Output only. The ID of the annotation spec that the model + evaluation applies to. The The ID is empty for the overall + model evaluation. For Tables annotation specs in the dataset + do not exist and this ID is always not set, but for + CLASSIFICATION + + [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type] + the + [display_name][google.cloud.automl.v1.ModelEvaluation.display_name] + field is used. + display_name (str): + Output only. The value of + [display_name][google.cloud.automl.v1.AnnotationSpec.display_name] + at the moment when the model was trained. Because this field + returns a value at model training time, for different models + trained from the same dataset, the values may differ, since + display names could had been changed between the two model's + trainings. For Tables CLASSIFICATION + + [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type] + distinct values of the target column at the moment of the + model evaluation are populated here. The display_name is + empty for the overall model evaluation. + create_time (~.timestamp.Timestamp): + Output only. Timestamp when this model + evaluation was created. + evaluated_example_count (int): + Output only. The number of examples used for model + evaluation, i.e. for which ground truth from time of model + creation is compared against the predicted annotations + created by the model. For overall ModelEvaluation (i.e. with + annotation_spec_id not set) this is the total number of all + examples used for evaluation. Otherwise, this is the count + of examples that according to the ground truth were + annotated by the + + [annotation_spec_id][google.cloud.automl.v1.ModelEvaluation.annotation_spec_id]. + """ + + classification_evaluation_metrics = proto.Field( + proto.MESSAGE, + number=8, + oneof="metrics", + message=classification.ClassificationEvaluationMetrics, + ) + + translation_evaluation_metrics = proto.Field( + proto.MESSAGE, + number=9, + oneof="metrics", + message=translation.TranslationEvaluationMetrics, + ) + + image_object_detection_evaluation_metrics = proto.Field( + proto.MESSAGE, + number=12, + oneof="metrics", + message=detection.ImageObjectDetectionEvaluationMetrics, + ) + + text_sentiment_evaluation_metrics = proto.Field( + proto.MESSAGE, + number=11, + oneof="metrics", + message=text_sentiment.TextSentimentEvaluationMetrics, + ) + + text_extraction_evaluation_metrics = proto.Field( + proto.MESSAGE, + number=13, + oneof="metrics", + message=text_extraction.TextExtractionEvaluationMetrics, + ) + + name = proto.Field(proto.STRING, number=1) + + annotation_spec_id = proto.Field(proto.STRING, number=2) + + display_name = proto.Field(proto.STRING, number=15) + + create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + evaluated_example_count = proto.Field(proto.INT32, number=6) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/operations.py b/google/cloud/automl_v1/types/operations.py new file mode 100644 index 00000000..e3ba76bd --- /dev/null +++ b/google/cloud/automl_v1/types/operations.py @@ -0,0 +1,265 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1.types import io +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.rpc import status_pb2 as status # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", + manifest={ + "OperationMetadata", + "DeleteOperationMetadata", + "DeployModelOperationMetadata", + "UndeployModelOperationMetadata", + "CreateDatasetOperationMetadata", + "CreateModelOperationMetadata", + "ImportDataOperationMetadata", + "ExportDataOperationMetadata", + "BatchPredictOperationMetadata", + "ExportModelOperationMetadata", + }, +) + + +class OperationMetadata(proto.Message): + r"""Metadata used across all long running operations returned by + AutoML API. + + Attributes: + delete_details (~.operations.DeleteOperationMetadata): + Details of a Delete operation. + deploy_model_details (~.operations.DeployModelOperationMetadata): + Details of a DeployModel operation. + undeploy_model_details (~.operations.UndeployModelOperationMetadata): + Details of an UndeployModel operation. + create_model_details (~.operations.CreateModelOperationMetadata): + Details of CreateModel operation. + create_dataset_details (~.operations.CreateDatasetOperationMetadata): + Details of CreateDataset operation. + import_data_details (~.operations.ImportDataOperationMetadata): + Details of ImportData operation. + batch_predict_details (~.operations.BatchPredictOperationMetadata): + Details of BatchPredict operation. + export_data_details (~.operations.ExportDataOperationMetadata): + Details of ExportData operation. + export_model_details (~.operations.ExportModelOperationMetadata): + Details of ExportModel operation. + progress_percent (int): + Output only. Progress of operation. Range: [0, 100]. Not + used currently. + partial_failures (Sequence[~.status.Status]): + Output only. Partial failures encountered. + E.g. single files that couldn't be read. + This field should never exceed 20 entries. + Status details field will contain standard GCP + error details. + create_time (~.timestamp.Timestamp): + Output only. Time when the operation was + created. + update_time (~.timestamp.Timestamp): + Output only. Time when the operation was + updated for the last time. + """ + + delete_details = proto.Field( + proto.MESSAGE, number=8, oneof="details", message="DeleteOperationMetadata", + ) + + deploy_model_details = proto.Field( + proto.MESSAGE, + number=24, + oneof="details", + message="DeployModelOperationMetadata", + ) + + undeploy_model_details = proto.Field( + proto.MESSAGE, + number=25, + oneof="details", + message="UndeployModelOperationMetadata", + ) + + create_model_details = proto.Field( + proto.MESSAGE, + number=10, + oneof="details", + message="CreateModelOperationMetadata", + ) + + create_dataset_details = proto.Field( + proto.MESSAGE, + number=30, + oneof="details", + message="CreateDatasetOperationMetadata", + ) + + import_data_details = proto.Field( + proto.MESSAGE, + number=15, + oneof="details", + message="ImportDataOperationMetadata", + ) + + batch_predict_details = proto.Field( + proto.MESSAGE, + number=16, + oneof="details", + message="BatchPredictOperationMetadata", + ) + + export_data_details = proto.Field( + proto.MESSAGE, + number=21, + oneof="details", + message="ExportDataOperationMetadata", + ) + + export_model_details = proto.Field( + proto.MESSAGE, + number=22, + oneof="details", + message="ExportModelOperationMetadata", + ) + + progress_percent = proto.Field(proto.INT32, number=13) + + partial_failures = proto.RepeatedField( + proto.MESSAGE, number=2, message=status.Status, + ) + + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + +class DeleteOperationMetadata(proto.Message): + r"""Details of operations that perform deletes of any entities.""" + + +class DeployModelOperationMetadata(proto.Message): + r"""Details of DeployModel operation.""" + + +class UndeployModelOperationMetadata(proto.Message): + r"""Details of UndeployModel operation.""" + + +class CreateDatasetOperationMetadata(proto.Message): + r"""Details of CreateDataset operation.""" + + +class CreateModelOperationMetadata(proto.Message): + r"""Details of CreateModel operation.""" + + +class ImportDataOperationMetadata(proto.Message): + r"""Details of ImportData operation.""" + + +class ExportDataOperationMetadata(proto.Message): + r"""Details of ExportData operation. + + Attributes: + output_info (~.operations.ExportDataOperationMetadata.ExportDataOutputInfo): + Output only. Information further describing + this export data's output. + """ + + class ExportDataOutputInfo(proto.Message): + r"""Further describes this export data's output. Supplements + [OutputConfig][google.cloud.automl.v1.OutputConfig]. + + Attributes: + gcs_output_directory (str): + The full path of the Google Cloud Storage + directory created, into which the exported data + is written. + """ + + gcs_output_directory = proto.Field( + proto.STRING, number=1, oneof="output_location" + ) + + output_info = proto.Field(proto.MESSAGE, number=1, message=ExportDataOutputInfo,) + + +class BatchPredictOperationMetadata(proto.Message): + r"""Details of BatchPredict operation. + + Attributes: + input_config (~.io.BatchPredictInputConfig): + Output only. The input config that was given + upon starting this batch predict operation. + output_info (~.operations.BatchPredictOperationMetadata.BatchPredictOutputInfo): + Output only. Information further describing + this batch predict's output. + """ + + class BatchPredictOutputInfo(proto.Message): + r"""Further describes this batch predict's output. Supplements + + [BatchPredictOutputConfig][google.cloud.automl.v1.BatchPredictOutputConfig]. + + Attributes: + gcs_output_directory (str): + The full path of the Google Cloud Storage + directory created, into which the prediction + output is written. + """ + + gcs_output_directory = proto.Field( + proto.STRING, number=1, oneof="output_location" + ) + + input_config = proto.Field( + proto.MESSAGE, number=1, message=io.BatchPredictInputConfig, + ) + + output_info = proto.Field(proto.MESSAGE, number=2, message=BatchPredictOutputInfo,) + + +class ExportModelOperationMetadata(proto.Message): + r"""Details of ExportModel operation. + + Attributes: + output_info (~.operations.ExportModelOperationMetadata.ExportModelOutputInfo): + Output only. Information further describing + the output of this model export. + """ + + class ExportModelOutputInfo(proto.Message): + r"""Further describes the output of model export. Supplements + [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. + + Attributes: + gcs_output_directory (str): + The full path of the Google Cloud Storage + directory created, into which the model will be + exported. + """ + + gcs_output_directory = proto.Field(proto.STRING, number=1) + + output_info = proto.Field(proto.MESSAGE, number=2, message=ExportModelOutputInfo,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/prediction_service.py b/google/cloud/automl_v1/types/prediction_service.py new file mode 100644 index 00000000..3b85f497 --- /dev/null +++ b/google/cloud/automl_v1/types/prediction_service.py @@ -0,0 +1,275 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1.types import annotation_payload +from google.cloud.automl_v1.types import data_items +from google.cloud.automl_v1.types import io + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", + manifest={ + "PredictRequest", + "PredictResponse", + "BatchPredictRequest", + "BatchPredictResult", + }, +) + + +class PredictRequest(proto.Message): + r"""Request message for + [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. + + Attributes: + name (str): + Required. Name of the model requested to + serve the prediction. + payload (~.data_items.ExamplePayload): + Required. Payload to perform a prediction on. + The payload must match the problem type that the + model was trained to solve. + params (Sequence[~.prediction_service.PredictRequest.ParamsEntry]): + Additional domain-specific parameters, any string must be up + to 25000 characters long. + + AutoML Vision Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. When + the model makes predictions for an image, it will only + produce results that have at least this confidence score. + The default is 0.5. + + AutoML Vision Object Detection + + ``score_threshold`` : (float) When Model detects objects on + the image, it will only produce bounding boxes which have at + least this confidence score. Value in 0 to 1 range, default + is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number of + bounding boxes returned. The default is 100. The number of + returned bounding boxes might be limited by the server. + + AutoML Tables + + ``feature_importance`` : (boolean) Whether + + [feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance] + is populated in the returned list of + [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation] + objects. The default is false. + """ + + name = proto.Field(proto.STRING, number=1) + + payload = proto.Field(proto.MESSAGE, number=2, message=data_items.ExamplePayload,) + + params = proto.MapField(proto.STRING, proto.STRING, number=3) + + +class PredictResponse(proto.Message): + r"""Response message for + [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. + + Attributes: + payload (Sequence[~.annotation_payload.AnnotationPayload]): + Prediction result. + AutoML Translation and AutoML Natural Language + Sentiment Analysis return precisely one payload. + preprocessed_input (~.data_items.ExamplePayload): + The preprocessed example that AutoML actually makes + prediction on. Empty if AutoML does not preprocess the input + example. + + For AutoML Natural Language (Classification, Entity + Extraction, and Sentiment Analysis), if the input is a + document, the recognized text is returned in the + [document_text][google.cloud.automl.v1.Document.document_text] + property. + metadata (Sequence[~.prediction_service.PredictResponse.MetadataEntry]): + Additional domain-specific prediction response metadata. + + AutoML Vision Object Detection + + ``max_bounding_box_count`` : (int64) The maximum number of + bounding boxes to return per image. + + AutoML Natural Language Sentiment Analysis + + ``sentiment_score`` : (float, deprecated) A value between -1 + and 1, -1 maps to least positive sentiment, while 1 maps to + the most positive one and the higher the score, the more + positive the sentiment in the document is. Yet these values + are relative to the training data, so e.g. if all data was + positive then -1 is also positive (though the least). + ``sentiment_score`` is not the same as "score" and + "magnitude" from Sentiment Analysis in the Natural Language + API. + """ + + payload = proto.RepeatedField( + proto.MESSAGE, number=1, message=annotation_payload.AnnotationPayload, + ) + + preprocessed_input = proto.Field( + proto.MESSAGE, number=3, message=data_items.ExamplePayload, + ) + + metadata = proto.MapField(proto.STRING, proto.STRING, number=2) + + +class BatchPredictRequest(proto.Message): + r"""Request message for + [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. + + Attributes: + name (str): + Required. Name of the model requested to + serve the batch prediction. + input_config (~.io.BatchPredictInputConfig): + Required. The input configuration for batch + prediction. + output_config (~.io.BatchPredictOutputConfig): + Required. The Configuration specifying where + output predictions should be written. + params (Sequence[~.prediction_service.BatchPredictRequest.ParamsEntry]): + Additional domain-specific parameters for the predictions, + any string must be up to 25000 characters long. + + AutoML Natural Language Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. When + the model makes predictions for a text snippet, it will only + produce results that have at least this confidence score. + The default is 0.5. + + AutoML Vision Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. When + the model makes predictions for an image, it will only + produce results that have at least this confidence score. + The default is 0.5. + + AutoML Vision Object Detection + + ``score_threshold`` : (float) When Model detects objects on + the image, it will only produce bounding boxes which have at + least this confidence score. Value in 0 to 1 range, default + is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number of + bounding boxes returned per image. The default is 100, the + number of bounding boxes returned might be limited by the + server. AutoML Video Intelligence Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. When + the model makes predictions for a video, it will only + produce results that have at least this confidence score. + The default is 0.5. + + ``segment_classification`` : (boolean) Set to true to + request segment-level classification. AutoML Video + Intelligence returns labels and their confidence scores for + the entire segment of the video that user specified in the + request configuration. The default is true. + + ``shot_classification`` : (boolean) Set to true to request + shot-level classification. AutoML Video Intelligence + determines the boundaries for each camera shot in the entire + segment of the video that user specified in the request + configuration. AutoML Video Intelligence then returns labels + and their confidence scores for each detected shot, along + with the start and end time of the shot. The default is + false. + + WARNING: Model evaluation is not done for this + classification type, the quality of it depends on training + data, but there are no metrics provided to describe that + quality. + + ``1s_interval_classification`` : (boolean) Set to true to + request classification for a video at one-second intervals. + AutoML Video Intelligence returns labels and their + confidence scores for each second of the entire segment of + the video that user specified in the request configuration. + The default is false. + + WARNING: Model evaluation is not done for this + classification type, the quality of it depends on training + data, but there are no metrics provided to describe that + quality. + + AutoML Video Intelligence Object Tracking + + ``score_threshold`` : (float) When Model detects objects on + video frames, it will only produce bounding boxes which have + at least this confidence score. Value in 0 to 1 range, + default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number of + bounding boxes returned per image. The default is 100, the + number of bounding boxes returned might be limited by the + server. + + ``min_bounding_box_size`` : (float) Only bounding boxes with + shortest edge at least that long as a relative value of + video frame size are returned. Value in 0 to 1 range. + Default is 0. + """ + + name = proto.Field(proto.STRING, number=1) + + input_config = proto.Field( + proto.MESSAGE, number=3, message=io.BatchPredictInputConfig, + ) + + output_config = proto.Field( + proto.MESSAGE, number=4, message=io.BatchPredictOutputConfig, + ) + + params = proto.MapField(proto.STRING, proto.STRING, number=5) + + +class BatchPredictResult(proto.Message): + r"""Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of the operation + returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. + + Attributes: + metadata (Sequence[~.prediction_service.BatchPredictResult.MetadataEntry]): + Additional domain-specific prediction response metadata. + + AutoML Vision Object Detection + + ``max_bounding_box_count`` : (int64) The maximum number of + bounding boxes returned per image. + + AutoML Video Intelligence Object Tracking + + ``max_bounding_box_count`` : (int64) The maximum number of + bounding boxes returned per frame. + """ + + metadata = proto.MapField(proto.STRING, proto.STRING, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/service.py b/google/cloud/automl_v1/types/service.py new file mode 100644 index 00000000..57211809 --- /dev/null +++ b/google/cloud/automl_v1/types/service.py @@ -0,0 +1,504 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1.types import dataset as gca_dataset +from google.cloud.automl_v1.types import image +from google.cloud.automl_v1.types import io +from google.cloud.automl_v1.types import model as gca_model +from google.cloud.automl_v1.types import model_evaluation as gca_model_evaluation +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", + manifest={ + "CreateDatasetRequest", + "GetDatasetRequest", + "ListDatasetsRequest", + "ListDatasetsResponse", + "UpdateDatasetRequest", + "DeleteDatasetRequest", + "ImportDataRequest", + "ExportDataRequest", + "GetAnnotationSpecRequest", + "CreateModelRequest", + "GetModelRequest", + "ListModelsRequest", + "ListModelsResponse", + "DeleteModelRequest", + "UpdateModelRequest", + "DeployModelRequest", + "UndeployModelRequest", + "ExportModelRequest", + "GetModelEvaluationRequest", + "ListModelEvaluationsRequest", + "ListModelEvaluationsResponse", + }, +) + + +class CreateDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. + + Attributes: + parent (str): + Required. The resource name of the project to + create the dataset for. + dataset (~.gca_dataset.Dataset): + Required. The dataset to create. + """ + + parent = proto.Field(proto.STRING, number=1) + + dataset = proto.Field(proto.MESSAGE, number=2, message=gca_dataset.Dataset,) + + +class GetDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. + + Attributes: + name (str): + Required. The resource name of the dataset to + retrieve. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListDatasetsRequest(proto.Message): + r"""Request message for + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + + Attributes: + parent (str): + Required. The resource name of the project + from which to list datasets. + filter (str): + An expression for filtering the results of the request. + + - ``dataset_metadata`` - for existence of the case (e.g. + image_classification_dataset_metadata:*). Some examples + of using the filter are: + + - ``translation_dataset_metadata:*`` --> The dataset has + translation_dataset_metadata. + page_size (int): + Requested page size. Server may return fewer + results than requested. If unspecified, server + will pick a default size. + page_token (str): + A token identifying a page of results for the server to + return Typically obtained via + [ListDatasetsResponse.next_page_token][google.cloud.automl.v1.ListDatasetsResponse.next_page_token] + of the previous + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets] + call. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=3) + + page_size = proto.Field(proto.INT32, number=4) + + page_token = proto.Field(proto.STRING, number=6) + + +class ListDatasetsResponse(proto.Message): + r"""Response message for + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + + Attributes: + datasets (Sequence[~.gca_dataset.Dataset]): + The datasets read. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListDatasetsRequest.page_token][google.cloud.automl.v1.ListDatasetsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + datasets = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_dataset.Dataset, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] + + Attributes: + dataset (~.gca_dataset.Dataset): + Required. The dataset which replaces the + resource on the server. + update_mask (~.field_mask.FieldMask): + Required. The update mask applies to the + resource. + """ + + dataset = proto.Field(proto.MESSAGE, number=1, message=gca_dataset.Dataset,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + +class DeleteDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. + + Attributes: + name (str): + Required. The resource name of the dataset to + delete. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ImportDataRequest(proto.Message): + r"""Request message for + [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. + + Attributes: + name (str): + Required. Dataset name. Dataset must already + exist. All imported annotations and examples + will be added. + input_config (~.io.InputConfig): + Required. The desired input location and its + domain specific semantics, if any. + """ + + name = proto.Field(proto.STRING, number=1) + + input_config = proto.Field(proto.MESSAGE, number=3, message=io.InputConfig,) + + +class ExportDataRequest(proto.Message): + r"""Request message for + [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. + + Attributes: + name (str): + Required. The resource name of the dataset. + output_config (~.io.OutputConfig): + Required. The desired output location. + """ + + name = proto.Field(proto.STRING, number=1) + + output_config = proto.Field(proto.MESSAGE, number=3, message=io.OutputConfig,) + + +class GetAnnotationSpecRequest(proto.Message): + r"""Request message for + [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. + + Attributes: + name (str): + Required. The resource name of the annotation + spec to retrieve. + """ + + name = proto.Field(proto.STRING, number=1) + + +class CreateModelRequest(proto.Message): + r"""Request message for + [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. + + Attributes: + parent (str): + Required. Resource name of the parent project + where the model is being created. + model (~.gca_model.Model): + Required. The model to create. + """ + + parent = proto.Field(proto.STRING, number=1) + + model = proto.Field(proto.MESSAGE, number=4, message=gca_model.Model,) + + +class GetModelRequest(proto.Message): + r"""Request message for + [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. + + Attributes: + name (str): + Required. Resource name of the model. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListModelsRequest(proto.Message): + r"""Request message for + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + + Attributes: + parent (str): + Required. Resource name of the project, from + which to list the models. + filter (str): + An expression for filtering the results of the request. + + - ``model_metadata`` - for existence of the case (e.g. + video_classification_model_metadata:*). + + - ``dataset_id`` - for = or !=. Some examples of using the + filter are: + + - ``image_classification_model_metadata:*`` --> The model + has image_classification_model_metadata. + + - ``dataset_id=5`` --> The model was created from a dataset + with ID 5. + page_size (int): + Requested page size. + page_token (str): + A token identifying a page of results for the server to + return Typically obtained via + [ListModelsResponse.next_page_token][google.cloud.automl.v1.ListModelsResponse.next_page_token] + of the previous + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels] + call. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=3) + + page_size = proto.Field(proto.INT32, number=4) + + page_token = proto.Field(proto.STRING, number=6) + + +class ListModelsResponse(proto.Message): + r"""Response message for + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + + Attributes: + model (Sequence[~.gca_model.Model]): + List of models in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelsRequest.page_token][google.cloud.automl.v1.ListModelsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + model = proto.RepeatedField(proto.MESSAGE, number=1, message=gca_model.Model,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class DeleteModelRequest(proto.Message): + r"""Request message for + [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. + + Attributes: + name (str): + Required. Resource name of the model being + deleted. + """ + + name = proto.Field(proto.STRING, number=1) + + +class UpdateModelRequest(proto.Message): + r"""Request message for + [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] + + Attributes: + model (~.gca_model.Model): + Required. The model which replaces the + resource on the server. + update_mask (~.field_mask.FieldMask): + Required. The update mask applies to the + resource. + """ + + model = proto.Field(proto.MESSAGE, number=1, message=gca_model.Model,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + +class DeployModelRequest(proto.Message): + r"""Request message for + [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. + + Attributes: + image_object_detection_model_deployment_metadata (~.image.ImageObjectDetectionModelDeploymentMetadata): + Model deployment metadata specific to Image + Object Detection. + image_classification_model_deployment_metadata (~.image.ImageClassificationModelDeploymentMetadata): + Model deployment metadata specific to Image + Classification. + name (str): + Required. Resource name of the model to + deploy. + """ + + image_object_detection_model_deployment_metadata = proto.Field( + proto.MESSAGE, + number=2, + oneof="model_deployment_metadata", + message=image.ImageObjectDetectionModelDeploymentMetadata, + ) + + image_classification_model_deployment_metadata = proto.Field( + proto.MESSAGE, + number=4, + oneof="model_deployment_metadata", + message=image.ImageClassificationModelDeploymentMetadata, + ) + + name = proto.Field(proto.STRING, number=1) + + +class UndeployModelRequest(proto.Message): + r"""Request message for + [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. + + Attributes: + name (str): + Required. Resource name of the model to + undeploy. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ExportModelRequest(proto.Message): + r"""Request message for + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an error code + will be returned. + + Attributes: + name (str): + Required. The resource name of the model to + export. + output_config (~.io.ModelExportOutputConfig): + Required. The desired output location and + configuration. + """ + + name = proto.Field(proto.STRING, number=1) + + output_config = proto.Field( + proto.MESSAGE, number=3, message=io.ModelExportOutputConfig, + ) + + +class GetModelEvaluationRequest(proto.Message): + r"""Request message for + [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. + + Attributes: + name (str): + Required. Resource name for the model + evaluation. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListModelEvaluationsRequest(proto.Message): + r"""Request message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. + + Attributes: + parent (str): + Required. Resource name of the model to list + the model evaluations for. If modelId is set as + "-", this will list model evaluations from + across all models of the parent location. + filter (str): + Required. An expression for filtering the results of the + request. + + - ``annotation_spec_id`` - for =, != or existence. See + example below for the last. + + Some examples of using the filter are: + + - ``annotation_spec_id!=4`` --> The model evaluation was + done for annotation spec with ID different than 4. + - ``NOT annotation_spec_id:*`` --> The model evaluation was + done for aggregate of all annotation specs. + page_size (int): + Requested page size. + page_token (str): + A token identifying a page of results for the server to + return. Typically obtained via + [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1.ListModelEvaluationsResponse.next_page_token] + of the previous + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] + call. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=3) + + page_size = proto.Field(proto.INT32, number=4) + + page_token = proto.Field(proto.STRING, number=6) + + +class ListModelEvaluationsResponse(proto.Message): + r"""Response message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. + + Attributes: + model_evaluation (Sequence[~.gca_model_evaluation.ModelEvaluation]): + List of model evaluations in the requested + page. + next_page_token (str): + A token to retrieve next page of results. Pass to the + [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1.ListModelEvaluationsRequest.page_token] + field of a new + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] + request to obtain that page. + """ + + @property + def raw_page(self): + return self + + model_evaluation = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_model_evaluation.ModelEvaluation, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/text.py b/google/cloud/automl_v1/types/text.py new file mode 100644 index 00000000..72039612 --- /dev/null +++ b/google/cloud/automl_v1/types/text.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1.types import classification + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", + manifest={ + "TextClassificationDatasetMetadata", + "TextClassificationModelMetadata", + "TextExtractionDatasetMetadata", + "TextExtractionModelMetadata", + "TextSentimentDatasetMetadata", + "TextSentimentModelMetadata", + }, +) + + +class TextClassificationDatasetMetadata(proto.Message): + r"""Dataset metadata for classification. + + Attributes: + classification_type (~.classification.ClassificationType): + Required. Type of the classification problem. + """ + + classification_type = proto.Field( + proto.ENUM, number=1, enum=classification.ClassificationType, + ) + + +class TextClassificationModelMetadata(proto.Message): + r"""Model metadata that is specific to text classification. + + Attributes: + classification_type (~.classification.ClassificationType): + Output only. Classification type of the + dataset used to train this model. + """ + + classification_type = proto.Field( + proto.ENUM, number=3, enum=classification.ClassificationType, + ) + + +class TextExtractionDatasetMetadata(proto.Message): + r"""Dataset metadata that is specific to text extraction""" + + +class TextExtractionModelMetadata(proto.Message): + r"""Model metadata that is specific to text extraction.""" + + +class TextSentimentDatasetMetadata(proto.Message): + r"""Dataset metadata for text sentiment. + + Attributes: + sentiment_max (int): + Required. A sentiment is expressed as an integer ordinal, + where higher value means a more positive sentiment. The + range of sentiments that will be used is between 0 and + sentiment_max (inclusive on both ends), and all the values + in the range must be represented in the dataset before a + model can be created. sentiment_max value must be between 1 + and 10 (inclusive). + """ + + sentiment_max = proto.Field(proto.INT32, number=1) + + +class TextSentimentModelMetadata(proto.Message): + r"""Model metadata that is specific to text sentiment.""" + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/text_extraction.py b/google/cloud/automl_v1/types/text_extraction.py new file mode 100644 index 00000000..19f1eb30 --- /dev/null +++ b/google/cloud/automl_v1/types/text_extraction.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1.types import text_segment as gca_text_segment + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", + manifest={"TextExtractionAnnotation", "TextExtractionEvaluationMetrics",}, +) + + +class TextExtractionAnnotation(proto.Message): + r"""Annotation for identifying spans of text. + + Attributes: + text_segment (~.gca_text_segment.TextSegment): + An entity annotation will set this, which is + the part of the original text to which the + annotation pertains. + score (float): + Output only. A confidence estimate between + 0.0 and 1.0. A higher value means greater + confidence in correctness of the annotation. + """ + + text_segment = proto.Field( + proto.MESSAGE, + number=3, + oneof="annotation", + message=gca_text_segment.TextSegment, + ) + + score = proto.Field(proto.FLOAT, number=1) + + +class TextExtractionEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for text extraction problems. + + Attributes: + au_prc (float): + Output only. The Area under precision recall + curve metric. + confidence_metrics_entries (Sequence[~.text_extraction.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry]): + Output only. Metrics that have confidence + thresholds. Precision-recall curve can be + derived from it. + """ + + class ConfidenceMetricsEntry(proto.Message): + r"""Metrics for a single confidence threshold. + + Attributes: + confidence_threshold (float): + Output only. The confidence threshold value + used to compute the metrics. Only annotations + with score of at least this threshold are + considered to be ones the model would return. + recall (float): + Output only. Recall under the given + confidence threshold. + precision (float): + Output only. Precision under the given + confidence threshold. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + recall = proto.Field(proto.FLOAT, number=3) + + precision = proto.Field(proto.FLOAT, number=4) + + f1_score = proto.Field(proto.FLOAT, number=5) + + au_prc = proto.Field(proto.FLOAT, number=1) + + confidence_metrics_entries = proto.RepeatedField( + proto.MESSAGE, number=2, message=ConfidenceMetricsEntry, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/text_segment.py b/google/cloud/automl_v1/types/text_segment.py new file mode 100644 index 00000000..5267a52a --- /dev/null +++ b/google/cloud/automl_v1/types/text_segment.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", manifest={"TextSegment",}, +) + + +class TextSegment(proto.Message): + r"""A contiguous part of a text (string), assuming it has an + UTF-8 NFC encoding. + + Attributes: + content (str): + Output only. The content of the TextSegment. + start_offset (int): + Required. Zero-based character index of the + first character of the text segment (counting + characters from the beginning of the text). + end_offset (int): + Required. Zero-based character index of the first character + past the end of the text segment (counting character from + the beginning of the text). The character at the end_offset + is NOT included in the text segment. + """ + + content = proto.Field(proto.STRING, number=3) + + start_offset = proto.Field(proto.INT64, number=1) + + end_offset = proto.Field(proto.INT64, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/text_sentiment.py b/google/cloud/automl_v1/types/text_sentiment.py new file mode 100644 index 00000000..576416e2 --- /dev/null +++ b/google/cloud/automl_v1/types/text_sentiment.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1.types import classification + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", + manifest={"TextSentimentAnnotation", "TextSentimentEvaluationMetrics",}, +) + + +class TextSentimentAnnotation(proto.Message): + r"""Contains annotation details specific to text sentiment. + + Attributes: + sentiment (int): + Output only. The sentiment with the semantic, as given to + the + [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] + when populating the dataset from which the model used for + the prediction had been trained. The sentiment values are + between 0 and + Dataset.text_sentiment_dataset_metadata.sentiment_max + (inclusive), with higher value meaning more positive + sentiment. They are completely relative, i.e. 0 means least + positive sentiment and sentiment_max means the most positive + from the sentiments present in the train data. Therefore + e.g. if train data had only negative sentiment, then + sentiment_max, would be still negative (although least + negative). The sentiment shouldn't be confused with "score" + or "magnitude" from the previous Natural Language Sentiment + Analysis API. + """ + + sentiment = proto.Field(proto.INT32, number=1) + + +class TextSentimentEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for text sentiment problems. + + Attributes: + precision (float): + Output only. Precision. + recall (float): + Output only. Recall. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + mean_absolute_error (float): + Output only. Mean absolute error. Only set + for the overall model evaluation, not for + evaluation of a single annotation spec. + mean_squared_error (float): + Output only. Mean squared error. Only set for + the overall model evaluation, not for evaluation + of a single annotation spec. + linear_kappa (float): + Output only. Linear weighted kappa. Only set + for the overall model evaluation, not for + evaluation of a single annotation spec. + quadratic_kappa (float): + Output only. Quadratic weighted kappa. Only + set for the overall model evaluation, not for + evaluation of a single annotation spec. + confusion_matrix (~.classification.ClassificationEvaluationMetrics.ConfusionMatrix): + Output only. Confusion matrix of the + evaluation. Only set for the overall model + evaluation, not for evaluation of a single + annotation spec. + """ + + precision = proto.Field(proto.FLOAT, number=1) + + recall = proto.Field(proto.FLOAT, number=2) + + f1_score = proto.Field(proto.FLOAT, number=3) + + mean_absolute_error = proto.Field(proto.FLOAT, number=4) + + mean_squared_error = proto.Field(proto.FLOAT, number=5) + + linear_kappa = proto.Field(proto.FLOAT, number=6) + + quadratic_kappa = proto.Field(proto.FLOAT, number=7) + + confusion_matrix = proto.Field( + proto.MESSAGE, + number=8, + message=classification.ClassificationEvaluationMetrics.ConfusionMatrix, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1/types/translation.py b/google/cloud/automl_v1/types/translation.py new file mode 100644 index 00000000..acea0220 --- /dev/null +++ b/google/cloud/automl_v1/types/translation.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1.types import data_items + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1", + manifest={ + "TranslationDatasetMetadata", + "TranslationEvaluationMetrics", + "TranslationModelMetadata", + "TranslationAnnotation", + }, +) + + +class TranslationDatasetMetadata(proto.Message): + r"""Dataset metadata that is specific to translation. + + Attributes: + source_language_code (str): + Required. The BCP-47 language code of the + source language. + target_language_code (str): + Required. The BCP-47 language code of the + target language. + """ + + source_language_code = proto.Field(proto.STRING, number=1) + + target_language_code = proto.Field(proto.STRING, number=2) + + +class TranslationEvaluationMetrics(proto.Message): + r"""Evaluation metrics for the dataset. + + Attributes: + bleu_score (float): + Output only. BLEU score. + base_bleu_score (float): + Output only. BLEU score for base model. + """ + + bleu_score = proto.Field(proto.DOUBLE, number=1) + + base_bleu_score = proto.Field(proto.DOUBLE, number=2) + + +class TranslationModelMetadata(proto.Message): + r"""Model metadata that is specific to translation. + + Attributes: + base_model (str): + The resource name of the model to use as a baseline to train + the custom model. If unset, we use the default base model + provided by Google Translate. Format: + ``projects/{project_id}/locations/{location_id}/models/{model_id}`` + source_language_code (str): + Output only. Inferred from the dataset. + The source language (The BCP-47 language code) + that is used for training. + target_language_code (str): + Output only. The target language (The BCP-47 + language code) that is used for training. + """ + + base_model = proto.Field(proto.STRING, number=1) + + source_language_code = proto.Field(proto.STRING, number=2) + + target_language_code = proto.Field(proto.STRING, number=3) + + +class TranslationAnnotation(proto.Message): + r"""Annotation details specific to translation. + + Attributes: + translated_content (~.data_items.TextSnippet): + Output only . The translated content. + """ + + translated_content = proto.Field( + proto.MESSAGE, number=1, message=data_items.TextSnippet, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/__init__.py b/google/cloud/automl_v1beta1/__init__.py index 8fcf0590..904a45aa 100644 --- a/google/cloud/automl_v1beta1/__init__.py +++ b/google/cloud/automl_v1beta1/__init__.py @@ -1,62 +1,273 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.automl_v1beta1 import types -from google.cloud.automl_v1beta1.gapic import auto_ml_client -from google.cloud.automl_v1beta1.gapic import enums -from google.cloud.automl_v1beta1.gapic import prediction_service_client -from google.cloud.automl_v1beta1.tables import tables_client -from google.cloud.automl_v1beta1.tables import gcs_client - - -class TablesClient(tables_client.TablesClient): - __doc__ = tables_client.TablesClient.__doc__ - - -class GcsClient(gcs_client.GcsClient): - __doc__ = gcs_client.GcsClient.__doc__ - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7. " - "More details about Python 2 support for Google Cloud Client Libraries " - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class PredictionServiceClient(prediction_service_client.PredictionServiceClient): - __doc__ = prediction_service_client.PredictionServiceClient.__doc__ - enums = enums - - -class AutoMlClient(auto_ml_client.AutoMlClient): - __doc__ = auto_ml_client.AutoMlClient.__doc__ - enums = enums +from .services.auto_ml import AutoMlClient +from .services.prediction_service import PredictionServiceClient +from .services.tables.gcs_client import GcsClient +from .services.tables.tables_client import TablesClient +from .types.annotation_payload import AnnotationPayload +from .types.annotation_spec import AnnotationSpec +from .types.classification import ClassificationAnnotation +from .types.classification import ClassificationEvaluationMetrics +from .types.classification import ClassificationType +from .types.classification import VideoClassificationAnnotation +from .types.column_spec import ColumnSpec +from .types.data_items import Document +from .types.data_items import DocumentDimensions +from .types.data_items import ExamplePayload +from .types.data_items import Image +from .types.data_items import Row +from .types.data_items import TextSnippet +from .types.data_stats import ArrayStats +from .types.data_stats import CategoryStats +from .types.data_stats import CorrelationStats +from .types.data_stats import DataStats +from .types.data_stats import Float64Stats +from .types.data_stats import StringStats +from .types.data_stats import StructStats +from .types.data_stats import TimestampStats +from .types.data_types import DataType +from .types.data_types import StructType +from .types.data_types import TypeCode +from .types.dataset import Dataset +from .types.detection import BoundingBoxMetricsEntry +from .types.detection import ImageObjectDetectionAnnotation +from .types.detection import ImageObjectDetectionEvaluationMetrics +from .types.detection import VideoObjectTrackingAnnotation +from .types.detection import VideoObjectTrackingEvaluationMetrics +from .types.geometry import BoundingPoly +from .types.geometry import NormalizedVertex +from .types.image import ImageClassificationDatasetMetadata +from .types.image import ImageClassificationModelDeploymentMetadata +from .types.image import ImageClassificationModelMetadata +from .types.image import ImageObjectDetectionDatasetMetadata +from .types.image import ImageObjectDetectionModelDeploymentMetadata +from .types.image import ImageObjectDetectionModelMetadata +from .types.io import BatchPredictInputConfig +from .types.io import BatchPredictOutputConfig +from .types.io import BigQueryDestination +from .types.io import BigQuerySource +from .types.io import DocumentInputConfig +from .types.io import ExportEvaluatedExamplesOutputConfig +from .types.io import GcrDestination +from .types.io import GcsDestination +from .types.io import GcsSource +from .types.io import InputConfig +from .types.io import ModelExportOutputConfig +from .types.io import OutputConfig +from .types.model import Model +from .types.model_evaluation import ModelEvaluation +from .types.operations import BatchPredictOperationMetadata +from .types.operations import CreateModelOperationMetadata +from .types.operations import DeleteOperationMetadata +from .types.operations import DeployModelOperationMetadata +from .types.operations import ExportDataOperationMetadata +from .types.operations import ExportEvaluatedExamplesOperationMetadata +from .types.operations import ExportModelOperationMetadata +from .types.operations import ImportDataOperationMetadata +from .types.operations import OperationMetadata +from .types.operations import UndeployModelOperationMetadata +from .types.prediction_service import BatchPredictRequest +from .types.prediction_service import BatchPredictResult +from .types.prediction_service import PredictRequest +from .types.prediction_service import PredictResponse +from .types.ranges import DoubleRange +from .types.regression import RegressionEvaluationMetrics +from .types.service import CreateDatasetRequest +from .types.service import CreateModelRequest +from .types.service import DeleteDatasetRequest +from .types.service import DeleteModelRequest +from .types.service import DeployModelRequest +from .types.service import ExportDataRequest +from .types.service import ExportEvaluatedExamplesRequest +from .types.service import ExportModelRequest +from .types.service import GetAnnotationSpecRequest +from .types.service import GetColumnSpecRequest +from .types.service import GetDatasetRequest +from .types.service import GetModelEvaluationRequest +from .types.service import GetModelRequest +from .types.service import GetTableSpecRequest +from .types.service import ImportDataRequest +from .types.service import ListColumnSpecsRequest +from .types.service import ListColumnSpecsResponse +from .types.service import ListDatasetsRequest +from .types.service import ListDatasetsResponse +from .types.service import ListModelEvaluationsRequest +from .types.service import ListModelEvaluationsResponse +from .types.service import ListModelsRequest +from .types.service import ListModelsResponse +from .types.service import ListTableSpecsRequest +from .types.service import ListTableSpecsResponse +from .types.service import UndeployModelRequest +from .types.service import UpdateColumnSpecRequest +from .types.service import UpdateDatasetRequest +from .types.service import UpdateTableSpecRequest +from .types.table_spec import TableSpec +from .types.tables import TablesAnnotation +from .types.tables import TablesDatasetMetadata +from .types.tables import TablesModelColumnInfo +from .types.tables import TablesModelMetadata +from .types.temporal import TimeSegment +from .types.text import TextClassificationDatasetMetadata +from .types.text import TextClassificationModelMetadata +from .types.text import TextExtractionDatasetMetadata +from .types.text import TextExtractionModelMetadata +from .types.text import TextSentimentDatasetMetadata +from .types.text import TextSentimentModelMetadata +from .types.text_extraction import TextExtractionAnnotation +from .types.text_extraction import TextExtractionEvaluationMetrics +from .types.text_segment import TextSegment +from .types.text_sentiment import TextSentimentAnnotation +from .types.text_sentiment import TextSentimentEvaluationMetrics +from .types.translation import TranslationAnnotation +from .types.translation import TranslationDatasetMetadata +from .types.translation import TranslationEvaluationMetrics +from .types.translation import TranslationModelMetadata +from .types.video import VideoClassificationDatasetMetadata +from .types.video import VideoClassificationModelMetadata +from .types.video import VideoObjectTrackingDatasetMetadata +from .types.video import VideoObjectTrackingModelMetadata __all__ = ( - "enums", - "types", + "GcsClient", + "TablesClient", + "AnnotationPayload", + "AnnotationSpec", + "ArrayStats", + "BatchPredictInputConfig", + "BatchPredictOperationMetadata", + "BatchPredictOutputConfig", + "BatchPredictRequest", + "BatchPredictResult", + "BigQueryDestination", + "BigQuerySource", + "BoundingBoxMetricsEntry", + "BoundingPoly", + "CategoryStats", + "ClassificationAnnotation", + "ClassificationEvaluationMetrics", + "ClassificationType", + "ColumnSpec", + "CorrelationStats", + "CreateDatasetRequest", + "CreateModelOperationMetadata", + "CreateModelRequest", + "DataStats", + "DataType", + "Dataset", + "DeleteDatasetRequest", + "DeleteModelRequest", + "DeleteOperationMetadata", + "DeployModelOperationMetadata", + "DeployModelRequest", + "Document", + "DocumentDimensions", + "DocumentInputConfig", + "DoubleRange", + "ExamplePayload", + "ExportDataOperationMetadata", + "ExportDataRequest", + "ExportEvaluatedExamplesOperationMetadata", + "ExportEvaluatedExamplesOutputConfig", + "ExportEvaluatedExamplesRequest", + "ExportModelOperationMetadata", + "ExportModelRequest", + "Float64Stats", + "GcrDestination", + "GcsDestination", + "GcsSource", + "GetAnnotationSpecRequest", + "GetColumnSpecRequest", + "GetDatasetRequest", + "GetModelEvaluationRequest", + "GetModelRequest", + "GetTableSpecRequest", + "Image", + "ImageClassificationDatasetMetadata", + "ImageClassificationModelDeploymentMetadata", + "ImageClassificationModelMetadata", + "ImageObjectDetectionAnnotation", + "ImageObjectDetectionDatasetMetadata", + "ImageObjectDetectionEvaluationMetrics", + "ImageObjectDetectionModelDeploymentMetadata", + "ImageObjectDetectionModelMetadata", + "ImportDataOperationMetadata", + "ImportDataRequest", + "InputConfig", + "ListColumnSpecsRequest", + "ListColumnSpecsResponse", + "ListDatasetsRequest", + "ListDatasetsResponse", + "ListModelEvaluationsRequest", + "ListModelEvaluationsResponse", + "ListModelsRequest", + "ListModelsResponse", + "ListTableSpecsRequest", + "ListTableSpecsResponse", + "Model", + "ModelEvaluation", + "ModelExportOutputConfig", + "NormalizedVertex", + "OperationMetadata", + "OutputConfig", + "PredictRequest", + "PredictResponse", "PredictionServiceClient", + "RegressionEvaluationMetrics", + "Row", + "StringStats", + "StructStats", + "StructType", + "TableSpec", + "TablesAnnotation", + "TablesDatasetMetadata", + "TablesModelColumnInfo", + "TablesModelMetadata", + "TextClassificationDatasetMetadata", + "TextClassificationModelMetadata", + "TextExtractionAnnotation", + "TextExtractionDatasetMetadata", + "TextExtractionEvaluationMetrics", + "TextExtractionModelMetadata", + "TextSegment", + "TextSentimentAnnotation", + "TextSentimentDatasetMetadata", + "TextSentimentEvaluationMetrics", + "TextSentimentModelMetadata", + "TextSnippet", + "TimeSegment", + "TimestampStats", + "TranslationAnnotation", + "TranslationDatasetMetadata", + "TranslationEvaluationMetrics", + "TranslationModelMetadata", + "TypeCode", + "UndeployModelOperationMetadata", + "UndeployModelRequest", + "UpdateColumnSpecRequest", + "UpdateDatasetRequest", + "UpdateTableSpecRequest", + "VideoClassificationAnnotation", + "VideoClassificationDatasetMetadata", + "VideoClassificationModelMetadata", + "VideoObjectTrackingAnnotation", + "VideoObjectTrackingDatasetMetadata", + "VideoObjectTrackingEvaluationMetrics", + "VideoObjectTrackingModelMetadata", "AutoMlClient", ) diff --git a/google/cloud/automl_v1beta1/gapic/__init__.py b/google/cloud/automl_v1beta1/gapic/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/automl_v1beta1/gapic/auto_ml_client.py b/google/cloud/automl_v1beta1/gapic/auto_ml_client.py deleted file mode 100644 index 3ce313fa..00000000 --- a/google/cloud/automl_v1beta1/gapic/auto_ml_client.py +++ /dev/null @@ -1,2470 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.cloud.automl.v1beta1 AutoMl API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import google.api_core.protobuf_helpers -import grpc - -from google.cloud.automl_v1beta1.gapic import auto_ml_client_config -from google.cloud.automl_v1beta1.gapic import enums -from google.cloud.automl_v1beta1.gapic.transports import auto_ml_grpc_transport -from google.cloud.automl_v1beta1.proto import annotation_spec_pb2 -from google.cloud.automl_v1beta1.proto import column_spec_pb2 -from google.cloud.automl_v1beta1.proto import data_items_pb2 -from google.cloud.automl_v1beta1.proto import dataset_pb2 -from google.cloud.automl_v1beta1.proto import image_pb2 -from google.cloud.automl_v1beta1.proto import io_pb2 -from google.cloud.automl_v1beta1.proto import model_evaluation_pb2 -from google.cloud.automl_v1beta1.proto import model_pb2 -from google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2 -from google.cloud.automl_v1beta1.proto import prediction_service_pb2 -from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc -from google.cloud.automl_v1beta1.proto import service_pb2 -from google.cloud.automl_v1beta1.proto import service_pb2_grpc -from google.cloud.automl_v1beta1.proto import table_spec_pb2 -from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl",).version - - -class AutoMlClient(object): - """ - AutoML Server API. - - The resource names are assigned by the server. The server never reuses - names that it has created after the resources with those names are - deleted. - - An ID of a resource is the last element of the item's resource name. For - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, - then the id for the item is ``{dataset_id}``. - - Currently the only supported ``location_id`` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - """ - - SERVICE_ADDRESS = "automl.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.cloud.automl.v1beta1.AutoMl" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoMlClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def annotation_spec_path(cls, project, location, dataset, annotation_spec): - """Return a fully-qualified annotation_spec string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}", - project=project, - location=location, - dataset=dataset, - annotation_spec=annotation_spec, - ) - - @classmethod - def column_spec_path(cls, project, location, dataset, table_spec, column_spec): - """Return a fully-qualified column_spec string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}", - project=project, - location=location, - dataset=dataset, - table_spec=table_spec, - column_spec=column_spec, - ) - - @classmethod - def dataset_path(cls, project, location, dataset): - """Return a fully-qualified dataset string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}/datasets/{dataset}", - project=project, - location=location, - dataset=dataset, - ) - - @classmethod - def location_path(cls, project, location): - """Return a fully-qualified location string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}", - project=project, - location=location, - ) - - @classmethod - def model_path(cls, project, location, model): - """Return a fully-qualified model string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}/models/{model}", - project=project, - location=location, - model=model, - ) - - @classmethod - def model_evaluation_path(cls, project, location, model, model_evaluation): - """Return a fully-qualified model_evaluation string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}", - project=project, - location=location, - model=model, - model_evaluation=model_evaluation, - ) - - @classmethod - def table_spec_path(cls, project, location, dataset, table_spec): - """Return a fully-qualified table_spec string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}", - project=project, - location=location, - dataset=dataset, - table_spec=table_spec, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.AutoMlGrpcTransport, - Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = auto_ml_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=auto_ml_grpc_transport.AutoMlGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_dataset( - self, - parent, - dataset, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a dataset. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') - >>> - >>> # TODO: Initialize `dataset`: - >>> dataset = {} - >>> - >>> response = client.create_dataset(parent, dataset) - - Args: - parent (str): Required. The resource name of the project to create the dataset for. - dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): Required. The dataset to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.Dataset` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_dataset" not in self._inner_api_calls: - self._inner_api_calls[ - "create_dataset" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_dataset, - default_retry=self._method_configs["CreateDataset"].retry, - default_timeout=self._method_configs["CreateDataset"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_dataset"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_dataset( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a dataset. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') - >>> - >>> response = client.get_dataset(name) - - Args: - name (str): Required. The resource name of the dataset to retrieve. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_dataset" not in self._inner_api_calls: - self._inner_api_calls[ - "get_dataset" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_dataset, - default_retry=self._method_configs["GetDataset"].retry, - default_timeout=self._method_configs["GetDataset"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.GetDatasetRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_dataset"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_datasets( - self, - parent, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists datasets in a project. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') - >>> - >>> # Iterate over all results - >>> for element in client.list_datasets(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_datasets(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The resource name of the project from which to list datasets. - filter_ (str): An expression for filtering the results of the request. - - - ``dataset_metadata`` - for existence of the case (e.g. - ``image_classification_dataset_metadata``). Some examples of using the - filter are: - - - ``translation_dataset_metadata:*`` --> The dataset has - translation_dataset_metadata. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_datasets" not in self._inner_api_calls: - self._inner_api_calls[ - "list_datasets" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_datasets, - default_retry=self._method_configs["ListDatasets"].retry, - default_timeout=self._method_configs["ListDatasets"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.ListDatasetsRequest( - parent=parent, filter=filter_, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_datasets"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="datasets", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def update_dataset( - self, - dataset, - update_mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a dataset. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> # TODO: Initialize `dataset`: - >>> dataset = {} - >>> - >>> response = client.update_dataset(dataset) - - Args: - dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): Required. The dataset which replaces the resource on the server. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.Dataset` - update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_dataset" not in self._inner_api_calls: - self._inner_api_calls[ - "update_dataset" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_dataset, - default_retry=self._method_configs["UpdateDataset"].retry, - default_timeout=self._method_configs["UpdateDataset"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.UpdateDatasetRequest( - dataset=dataset, update_mask=update_mask, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("dataset.name", dataset.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_dataset"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_dataset( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a dataset and all of its contents. Returns empty response in - the ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') - >>> - >>> response = client.delete_dataset(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. The resource name of the dataset to delete. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_dataset" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_dataset" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_dataset, - default_retry=self._method_configs["DeleteDataset"].retry, - default_timeout=self._method_configs["DeleteDataset"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.DeleteDatasetRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["delete_dataset"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def import_data( - self, - name, - input_config, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Imports data into a dataset. For Tables this method can only be - called on an empty Dataset. - - For Tables: - - - A ``schema_inference_version`` parameter must be explicitly set. - Returns an empty response in the ``response`` field when it - completes. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') - >>> - >>> # TODO: Initialize `input_config`: - >>> input_config = {} - >>> - >>> response = client.import_data(name, input_config) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. Dataset name. Dataset must already exist. All imported - annotations and examples will be added. - input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input location and its domain specific semantics, - if any. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.InputConfig` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "import_data" not in self._inner_api_calls: - self._inner_api_calls[ - "import_data" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.import_data, - default_retry=self._method_configs["ImportData"].retry, - default_timeout=self._method_configs["ImportData"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.ImportDataRequest(name=name, input_config=input_config,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["import_data"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def export_data( - self, - name, - output_config, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Exports dataset's data to the provided output location. Returns an - empty response in the ``response`` field when it completes. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') - >>> - >>> # TODO: Initialize `output_config`: - >>> output_config = {} - >>> - >>> response = client.export_data(name, output_config) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. The resource name of the dataset. - output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output location. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.OutputConfig` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "export_data" not in self._inner_api_calls: - self._inner_api_calls[ - "export_data" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.export_data, - default_retry=self._method_configs["ExportData"].retry, - default_timeout=self._method_configs["ExportData"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.ExportDataRequest(name=name, output_config=output_config,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["export_data"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def get_annotation_spec( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets an annotation spec. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> name = client.annotation_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[ANNOTATION_SPEC]') - >>> - >>> response = client.get_annotation_spec(name) - - Args: - name (str): Required. The resource name of the annotation spec to retrieve. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types.AnnotationSpec` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_annotation_spec" not in self._inner_api_calls: - self._inner_api_calls[ - "get_annotation_spec" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_annotation_spec, - default_retry=self._method_configs["GetAnnotationSpec"].retry, - default_timeout=self._method_configs["GetAnnotationSpec"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.GetAnnotationSpecRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_annotation_spec"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_table_spec( - self, - name, - field_mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a table spec. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> name = client.table_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]') - >>> - >>> response = client.get_table_spec(name) - - Args: - name (str): Required. The resource name of the table spec to retrieve. - field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_table_spec" not in self._inner_api_calls: - self._inner_api_calls[ - "get_table_spec" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_table_spec, - default_retry=self._method_configs["GetTableSpec"].retry, - default_timeout=self._method_configs["GetTableSpec"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.GetTableSpecRequest(name=name, field_mask=field_mask,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_table_spec"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_table_specs( - self, - parent, - field_mask=None, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists table specs in a dataset. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> parent = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') - >>> - >>> # Iterate over all results - >>> for element in client.list_table_specs(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_table_specs(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The resource name of the dataset to list table specs from. - field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` - filter_ (str): Filter expression, see go/filtering. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.TableSpec` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_table_specs" not in self._inner_api_calls: - self._inner_api_calls[ - "list_table_specs" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_table_specs, - default_retry=self._method_configs["ListTableSpecs"].retry, - default_timeout=self._method_configs["ListTableSpecs"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.ListTableSpecsRequest( - parent=parent, field_mask=field_mask, filter=filter_, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_table_specs"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="table_specs", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def update_table_spec( - self, - table_spec, - update_mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a table spec. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> # TODO: Initialize `table_spec`: - >>> table_spec = {} - >>> - >>> response = client.update_table_spec(table_spec) - - Args: - table_spec (Union[dict, ~google.cloud.automl_v1beta1.types.TableSpec]): Required. The table spec which replaces the resource on the server. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.TableSpec` - update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_table_spec" not in self._inner_api_calls: - self._inner_api_calls[ - "update_table_spec" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_table_spec, - default_retry=self._method_configs["UpdateTableSpec"].retry, - default_timeout=self._method_configs["UpdateTableSpec"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.UpdateTableSpecRequest( - table_spec=table_spec, update_mask=update_mask, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_spec.name", table_spec.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_table_spec"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_column_spec( - self, - name, - field_mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a column spec. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> name = client.column_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]', '[COLUMN_SPEC]') - >>> - >>> response = client.get_column_spec(name) - - Args: - name (str): Required. The resource name of the column spec to retrieve. - field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_column_spec" not in self._inner_api_calls: - self._inner_api_calls[ - "get_column_spec" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_column_spec, - default_retry=self._method_configs["GetColumnSpec"].retry, - default_timeout=self._method_configs["GetColumnSpec"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.GetColumnSpecRequest(name=name, field_mask=field_mask,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_column_spec"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_column_specs( - self, - parent, - field_mask=None, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists column specs in a table spec. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> parent = client.table_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]') - >>> - >>> # Iterate over all results - >>> for element in client.list_column_specs(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_column_specs(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The resource name of the table spec to list column specs from. - field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` - filter_ (str): Filter expression, see go/filtering. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_column_specs" not in self._inner_api_calls: - self._inner_api_calls[ - "list_column_specs" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_column_specs, - default_retry=self._method_configs["ListColumnSpecs"].retry, - default_timeout=self._method_configs["ListColumnSpecs"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.ListColumnSpecsRequest( - parent=parent, field_mask=field_mask, filter=filter_, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_column_specs"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="column_specs", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def update_column_spec( - self, - column_spec, - update_mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a column spec. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> # TODO: Initialize `column_spec`: - >>> column_spec = {} - >>> - >>> response = client.update_column_spec(column_spec) - - Args: - column_spec (Union[dict, ~google.cloud.automl_v1beta1.types.ColumnSpec]): Required. The column spec which replaces the resource on the server. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` - update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_column_spec" not in self._inner_api_calls: - self._inner_api_calls[ - "update_column_spec" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_column_spec, - default_retry=self._method_configs["UpdateColumnSpec"].retry, - default_timeout=self._method_configs["UpdateColumnSpec"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.UpdateColumnSpecRequest( - column_spec=column_spec, update_mask=update_mask, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("column_spec.name", column_spec.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_column_spec"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_model( - self, - parent, - model, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a model. Returns a Model in the ``response`` field when it - completes. When you create a model, several model evaluations are - created for it: a global evaluation, and one evaluation for each - annotation spec. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') - >>> - >>> # TODO: Initialize `model`: - >>> model = {} - >>> - >>> response = client.create_model(parent, model) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. Resource name of the parent project where the model is being created. - model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): Required. The model to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.Model` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_model" not in self._inner_api_calls: - self._inner_api_calls[ - "create_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_model, - default_retry=self._method_configs["CreateModel"].retry, - default_timeout=self._method_configs["CreateModel"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.CreateModelRequest(parent=parent, model=model,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - model_pb2.Model, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def get_model( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a model. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> response = client.get_model(name) - - Args: - name (str): Required. Resource name of the model. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types.Model` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_model" not in self._inner_api_calls: - self._inner_api_calls[ - "get_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_model, - default_retry=self._method_configs["GetModel"].retry, - default_timeout=self._method_configs["GetModel"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.GetModelRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_models( - self, - parent, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists models. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') - >>> - >>> # Iterate over all results - >>> for element in client.list_models(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_models(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. Resource name of the project, from which to list the models. - filter_ (str): An expression for filtering the results of the request. - - - ``model_metadata`` - for existence of the case (e.g. - ``video_classification_model_metadata:*``). - - - ``dataset_id`` - for = or !=. Some examples of using the filter are: - - - ``image_classification_model_metadata:*`` --> The model has - image_classification_model_metadata. - - - ``dataset_id=5`` --> The model was created from a dataset with ID 5. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_models" not in self._inner_api_calls: - self._inner_api_calls[ - "list_models" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_models, - default_retry=self._method_configs["ListModels"].retry, - default_timeout=self._method_configs["ListModels"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.ListModelsRequest( - parent=parent, filter=filter_, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_models"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="model", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def delete_model( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a model. Returns ``google.protobuf.Empty`` in the - ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> response = client.delete_model(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. Resource name of the model being deleted. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_model" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_model, - default_retry=self._method_configs["DeleteModel"].retry, - default_timeout=self._method_configs["DeleteModel"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.DeleteModelRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["delete_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def deploy_model( - self, - name, - image_object_detection_model_deployment_metadata=None, - image_classification_model_deployment_metadata=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deploys a model. If a model is already deployed, deploying it with - the same parameters has no effect. Deploying with different parametrs - (as e.g. changing - - ``node_number``) will reset the deployment state without pausing the - model's availability. - - Only applicable for Text Classification, Image Object Detection , - Tables, and Image Segmentation; all other domains manage deployment - automatically. - - Returns an empty response in the ``response`` field when it completes. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> response = client.deploy_model(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. Resource name of the model to deploy. - image_object_detection_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata]): Model deployment metadata specific to Image Object Detection. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata` - image_classification_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata]): Model deployment metadata specific to Image Classification. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "deploy_model" not in self._inner_api_calls: - self._inner_api_calls[ - "deploy_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.deploy_model, - default_retry=self._method_configs["DeployModel"].retry, - default_timeout=self._method_configs["DeployModel"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, - image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, - ) - - request = service_pb2.DeployModelRequest( - name=name, - image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, - image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["deploy_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def undeploy_model( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Undeploys a model. If the model is not deployed this method has no - effect. - - Only applicable for Text Classification, Image Object Detection and - Tables; all other domains manage deployment automatically. - - Returns an empty response in the ``response`` field when it completes. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> response = client.undeploy_model(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. Resource name of the model to undeploy. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "undeploy_model" not in self._inner_api_calls: - self._inner_api_calls[ - "undeploy_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.undeploy_model, - default_retry=self._method_configs["UndeployModel"].retry, - default_timeout=self._method_configs["UndeployModel"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.UndeployModelRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["undeploy_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def export_model( - self, - name, - output_config, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Exports a trained, "export-able", model to a user specified Google - Cloud Storage location. A model is considered export-able if and only if - it has an export format defined for it in - - ``ModelExportOutputConfig``. - - Returns an empty response in the ``response`` field when it completes. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> # TODO: Initialize `output_config`: - >>> output_config = {} - >>> - >>> response = client.export_model(name, output_config) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. The resource name of the model to export. - output_config (Union[dict, ~google.cloud.automl_v1beta1.types.ModelExportOutputConfig]): Required. The desired output location and configuration. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ModelExportOutputConfig` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "export_model" not in self._inner_api_calls: - self._inner_api_calls[ - "export_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.export_model, - default_retry=self._method_configs["ExportModel"].retry, - default_timeout=self._method_configs["ExportModel"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.ExportModelRequest( - name=name, output_config=output_config, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["export_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def export_evaluated_examples( - self, - name, - output_config, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Exports examples on which the model was evaluated (i.e. which were - in the TEST set of the dataset the model was created from), together - with their ground truth annotations and the annotations created - (predicted) by the model. The examples, ground truth and predictions are - exported in the state they were at the moment the model was evaluated. - - This export is available only for 30 days since the model evaluation is - created. - - Currently only available for Tables. - - Returns an empty response in the ``response`` field when it completes. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> # TODO: Initialize `output_config`: - >>> output_config = {} - >>> - >>> response = client.export_evaluated_examples(name, output_config) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. The resource name of the model whose evaluated examples are to - be exported. - output_config (Union[dict, ~google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig]): Required. The desired output location and configuration. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "export_evaluated_examples" not in self._inner_api_calls: - self._inner_api_calls[ - "export_evaluated_examples" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.export_evaluated_examples, - default_retry=self._method_configs["ExportEvaluatedExamples"].retry, - default_timeout=self._method_configs["ExportEvaluatedExamples"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.ExportEvaluatedExamplesRequest( - name=name, output_config=output_config, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["export_evaluated_examples"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def get_model_evaluation( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a model evaluation. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]') - >>> - >>> response = client.get_model_evaluation(name) - - Args: - name (str): Required. Resource name for the model evaluation. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_model_evaluation" not in self._inner_api_calls: - self._inner_api_calls[ - "get_model_evaluation" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_model_evaluation, - default_retry=self._method_configs["GetModelEvaluation"].retry, - default_timeout=self._method_configs["GetModelEvaluation"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.GetModelEvaluationRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_model_evaluation"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_model_evaluations( - self, - parent, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists model evaluations. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> # Iterate over all results - >>> for element in client.list_model_evaluations(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_model_evaluations(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. Resource name of the model to list the model evaluations for. - If modelId is set as "-", this will list model evaluations from across all - models of the parent location. - filter_ (str): An expression for filtering the results of the request. - - - ``annotation_spec_id`` - for =, != or existence. See example below - for the last. - - Some examples of using the filter are: - - - ``annotation_spec_id!=4`` --> The model evaluation was done for - annotation spec with ID different than 4. - - ``NOT annotation_spec_id:*`` --> The model evaluation was done for - aggregate of all annotation specs. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_model_evaluations" not in self._inner_api_calls: - self._inner_api_calls[ - "list_model_evaluations" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_model_evaluations, - default_retry=self._method_configs["ListModelEvaluations"].retry, - default_timeout=self._method_configs["ListModelEvaluations"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.ListModelEvaluationsRequest( - parent=parent, filter=filter_, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_model_evaluations"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="model_evaluation", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator diff --git a/google/cloud/automl_v1beta1/gapic/auto_ml_client_config.py b/google/cloud/automl_v1beta1/gapic/auto_ml_client_config.py deleted file mode 100644 index 7319dbad..00000000 --- a/google/cloud/automl_v1beta1/gapic/auto_ml_client_config.py +++ /dev/null @@ -1,162 +0,0 @@ -config = { - "interfaces": { - "google.cloud.automl.v1beta1.AutoMl": { - "retry_codes": { - "retry_policy_1_codes": ["UNAVAILABLE", "DEADLINE_EXCEEDED"], - "no_retry_2_codes": [], - "no_retry_codes": [], - }, - "retry_params": { - "retry_policy_1_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 5000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 5000, - "total_timeout_millis": 5000, - }, - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - "no_retry_2_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 5000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 5000, - "total_timeout_millis": 5000, - }, - }, - "methods": { - "CreateDataset": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - "GetDataset": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ListDatasets": { - "timeout_millis": 50000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "UpdateDataset": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - "DeleteDataset": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ImportData": { - "timeout_millis": 20000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - "ExportData": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - "GetAnnotationSpec": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "GetTableSpec": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ListTableSpecs": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "UpdateTableSpec": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - "GetColumnSpec": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ListColumnSpecs": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "UpdateColumnSpec": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - "CreateModel": { - "timeout_millis": 20000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - "GetModel": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ListModels": { - "timeout_millis": 50000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "DeleteModel": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "DeployModel": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - "UndeployModel": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - "ExportModel": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - "ExportEvaluatedExamples": { - "timeout_millis": 5000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - "GetModelEvaluation": { - "timeout_millis": 5000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ListModelEvaluations": { - "timeout_millis": 50000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - }, - } - } -} diff --git a/google/cloud/automl_v1beta1/gapic/enums.py b/google/cloud/automl_v1beta1/gapic/enums.py deleted file mode 100644 index 2560c4f9..00000000 --- a/google/cloud/automl_v1beta1/gapic/enums.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class ClassificationType(enum.IntEnum): - """ - Type of the classification problem. - - Attributes: - CLASSIFICATION_TYPE_UNSPECIFIED (int): An un-set value of this enum. - MULTICLASS (int): At most one label is allowed per example. - MULTILABEL (int): Multiple labels are allowed for one example. - """ - - CLASSIFICATION_TYPE_UNSPECIFIED = 0 - MULTICLASS = 1 - MULTILABEL = 2 - - -class NullValue(enum.IntEnum): - """ - ``NullValue`` is a singleton enumeration to represent the null value - for the ``Value`` type union. - - The JSON representation for ``NullValue`` is JSON ``null``. - - Attributes: - NULL_VALUE (int): Null value. - """ - - NULL_VALUE = 0 - - -class TypeCode(enum.IntEnum): - """ - ``TypeCode`` is used as a part of ``DataType``. - - Attributes: - TYPE_CODE_UNSPECIFIED (int): Not specified. Should not be used. - FLOAT64 (int): Encoded as ``number``, or the strings ``"NaN"``, ``"Infinity"``, or - ``"-Infinity"``. - TIMESTAMP (int): Must be between 0AD and 9999AD. Encoded as ``string`` according to - ``time_format``, or, if that format is not set, then in RFC 3339 - ``date-time`` format, where ``time-offset`` = ``"Z"`` (e.g. - 1985-04-12T23:20:50.52Z). - STRING (int): Encoded as ``string``. - ARRAY (int): Encoded as ``list``, where the list elements are represented - according to - - ``list_element_type``. - STRUCT (int): Encoded as ``struct``, where field values are represented according - to ``struct_type``. - CATEGORY (int): Values of this type are not further understood by AutoML, e.g. - AutoML is unable to tell the order of values (as it could with FLOAT64), - or is unable to say if one value contains another (as it could with - STRING). Encoded as ``string`` (bytes should be base64-encoded, as - described in RFC 4648, section 4). - """ - - TYPE_CODE_UNSPECIFIED = 0 - FLOAT64 = 3 - TIMESTAMP = 4 - STRING = 6 - ARRAY = 8 - STRUCT = 9 - CATEGORY = 10 - - -class Document(object): - class Layout(object): - class TextSegmentType(enum.IntEnum): - """ - The type of TextSegment in the context of the original document. - - Attributes: - TEXT_SEGMENT_TYPE_UNSPECIFIED (int): Should not be used. - TOKEN (int): The text segment is a token. e.g. word. - PARAGRAPH (int): The text segment is a paragraph. - FORM_FIELD (int): The text segment is a form field. - FORM_FIELD_NAME (int): The text segment is the name part of a form field. It will be - treated as child of another FORM_FIELD TextSegment if its span is - subspan of another TextSegment with type FORM_FIELD. - FORM_FIELD_CONTENTS (int): The text segment is the text content part of a form field. It will - be treated as child of another FORM_FIELD TextSegment if its span is - subspan of another TextSegment with type FORM_FIELD. - TABLE (int): The text segment is a whole table, including headers, and all rows. - TABLE_HEADER (int): The text segment is a table's headers. It will be treated as child of - another TABLE TextSegment if its span is subspan of another TextSegment - with type TABLE. - TABLE_ROW (int): The text segment is a row in table. It will be treated as child of - another TABLE TextSegment if its span is subspan of another TextSegment - with type TABLE. - TABLE_CELL (int): The text segment is a cell in table. It will be treated as child of - another TABLE_ROW TextSegment if its span is subspan of another - TextSegment with type TABLE_ROW. - """ - - TEXT_SEGMENT_TYPE_UNSPECIFIED = 0 - TOKEN = 1 - PARAGRAPH = 2 - FORM_FIELD = 3 - FORM_FIELD_NAME = 4 - FORM_FIELD_CONTENTS = 5 - TABLE = 6 - TABLE_HEADER = 7 - TABLE_ROW = 8 - TABLE_CELL = 9 - - -class DocumentDimensions(object): - class DocumentDimensionUnit(enum.IntEnum): - """ - Unit of the document dimension. - - Attributes: - DOCUMENT_DIMENSION_UNIT_UNSPECIFIED (int): Should not be used. - INCH (int): Document dimension is measured in inches. - CENTIMETER (int): Document dimension is measured in centimeters. - POINT (int): Document dimension is measured in points. 72 points = 1 inch. - """ - - DOCUMENT_DIMENSION_UNIT_UNSPECIFIED = 0 - INCH = 1 - CENTIMETER = 2 - POINT = 3 - - -class Model(object): - class DeploymentState(enum.IntEnum): - """ - Deployment state of the model. - - Attributes: - DEPLOYMENT_STATE_UNSPECIFIED (int): Should not be used, an un-set enum has this value by default. - DEPLOYED (int): Model is deployed. - UNDEPLOYED (int): Model is not deployed. - """ - - DEPLOYMENT_STATE_UNSPECIFIED = 0 - DEPLOYED = 1 - UNDEPLOYED = 2 diff --git a/google/cloud/automl_v1beta1/gapic/prediction_service_client.py b/google/cloud/automl_v1beta1/gapic/prediction_service_client.py deleted file mode 100644 index 2bcb31ca..00000000 --- a/google/cloud/automl_v1beta1/gapic/prediction_service_client.py +++ /dev/null @@ -1,514 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.cloud.automl.v1beta1 PredictionService API.""" - -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.path_template -import grpc - -from google.cloud.automl_v1beta1.gapic import enums -from google.cloud.automl_v1beta1.gapic import prediction_service_client_config -from google.cloud.automl_v1beta1.gapic.transports import ( - prediction_service_grpc_transport, -) -from google.cloud.automl_v1beta1.proto import data_items_pb2 -from google.cloud.automl_v1beta1.proto import io_pb2 -from google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2 -from google.cloud.automl_v1beta1.proto import prediction_service_pb2 -from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc -from google.longrunning import operations_pb2 as longrunning_operations_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl",).version - - -class PredictionServiceClient(object): - """ - AutoML Prediction API. - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - """ - - SERVICE_ADDRESS = "automl.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.cloud.automl.v1beta1.PredictionService" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def model_path(cls, project, location, model): - """Return a fully-qualified model string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}/models/{model}", - project=project, - location=location, - model=model, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.PredictionServiceGrpcTransport, - Callable[[~.Credentials, type], ~.PredictionServiceGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = prediction_service_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=prediction_service_grpc_transport.PredictionServiceGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = prediction_service_grpc_transport.PredictionServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def predict( - self, - name, - payload, - params=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Perform an online prediction. The prediction result will be directly - returned in the response. Available for following ML problems, and their - expected request payloads: - - - Image Classification - Image in .JPEG, .GIF or .PNG format, - image_bytes up to 30MB. - - Image Object Detection - Image in .JPEG, .GIF or .PNG format, - image_bytes up to 30MB. - - Text Classification - TextSnippet, content up to 60,000 characters, - UTF-8 encoded. - - Text Extraction - TextSnippet, content up to 30,000 characters, UTF-8 - NFC encoded. - - Translation - TextSnippet, content up to 25,000 characters, UTF-8 - encoded. - - Tables - Row, with column values matching the columns of the model, - up to 5MB. Not available for FORECASTING - - ``prediction_type``. - - - Text Sentiment - TextSnippet, content up 500 characters, UTF-8 - encoded. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.PredictionServiceClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> # TODO: Initialize `payload`: - >>> payload = {} - >>> - >>> response = client.predict(name, payload) - - Args: - name (str): Required. Name of the model requested to serve the prediction. - payload (Union[dict, ~google.cloud.automl_v1beta1.types.ExamplePayload]): Required. Payload to perform a prediction on. The payload must match the - problem type that the model was trained to solve. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ExamplePayload` - params (dict[str -> str]): Additional domain-specific parameters, any string must be up to - 25000 characters long. - - - For Image Classification: - - ``score_threshold`` - (float) A value from 0.0 to 1.0. When the model - makes predictions for an image, it will only produce results that - have at least this confidence score. The default is 0.5. - - - For Image Object Detection: ``score_threshold`` - (float) When Model - detects objects on the image, it will only produce bounding boxes - which have at least this confidence score. Value in 0 to 1 range, - default is 0.5. ``max_bounding_box_count`` - (int64) No more than - this number of bounding boxes will be returned in the response. - Default is 100, the requested value may be limited by server. - - - For Tables: feature_importance - (boolean) Whether feature importance - should be populated in the returned TablesAnnotation. The default is - false. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types.PredictResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "predict" not in self._inner_api_calls: - self._inner_api_calls[ - "predict" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.predict, - default_retry=self._method_configs["Predict"].retry, - default_timeout=self._method_configs["Predict"].timeout, - client_info=self._client_info, - ) - - request = prediction_service_pb2.PredictRequest( - name=name, payload=payload, params=params, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["predict"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def batch_predict( - self, - name, - input_config, - output_config, - params, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Perform a batch prediction. Unlike the online ``Predict``, batch - prediction result won't be immediately available in the response. - Instead, a long running operation object is returned. User can poll the - operation result via ``GetOperation`` method. Once the operation is - done, ``BatchPredictResult`` is returned in the ``response`` field. - Available for following ML problems: - - - Image Classification - - Image Object Detection - - Video Classification - - Video Object Tracking \* Text Extraction - - Tables - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.PredictionServiceClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> # TODO: Initialize `input_config`: - >>> input_config = {} - >>> - >>> # TODO: Initialize `output_config`: - >>> output_config = {} - >>> - >>> # TODO: Initialize `params`: - >>> params = {} - >>> - >>> response = client.batch_predict(name, input_config, output_config, params) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. Name of the model requested to serve the batch prediction. - input_config (Union[dict, ~google.cloud.automl_v1beta1.types.BatchPredictInputConfig]): Required. The input configuration for batch prediction. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.BatchPredictInputConfig` - output_config (Union[dict, ~google.cloud.automl_v1beta1.types.BatchPredictOutputConfig]): Required. The Configuration specifying where output predictions should - be written. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.BatchPredictOutputConfig` - params (dict[str -> str]): Required. Additional domain-specific parameters for the predictions, - any string must be up to 25000 characters long. - - - For Text Classification: - - ``score_threshold`` - (float) A value from 0.0 to 1.0. When the model - makes predictions for a text snippet, it will only produce results - that have at least this confidence score. The default is 0.5. - - - For Image Classification: - - ``score_threshold`` - (float) A value from 0.0 to 1.0. When the model - makes predictions for an image, it will only produce results that - have at least this confidence score. The default is 0.5. - - - For Image Object Detection: - - ``score_threshold`` - (float) When Model detects objects on the - image, it will only produce bounding boxes which have at least this - confidence score. Value in 0 to 1 range, default is 0.5. - ``max_bounding_box_count`` - (int64) No more than this number of - bounding boxes will be produced per image. Default is 100, the - requested value may be limited by server. - - - For Video Classification : - - ``score_threshold`` - (float) A value from 0.0 to 1.0. When the model - makes predictions for a video, it will only produce results that have - at least this confidence score. The default is 0.5. - ``segment_classification`` - (boolean) Set to true to request - segment-level classification. AutoML Video Intelligence returns - labels and their confidence scores for the entire segment of the - video that user specified in the request configuration. The default - is "true". ``shot_classification`` - (boolean) Set to true to request - shot-level classification. AutoML Video Intelligence determines the - boundaries for each camera shot in the entire segment of the video - that user specified in the request configuration. AutoML Video - Intelligence then returns labels and their confidence scores for each - detected shot, along with the start and end time of the shot. - WARNING: Model evaluation is not done for this classification type, - the quality of it depends on training data, but there are no metrics - provided to describe that quality. The default is "false". - ``1s_interval_classification`` - (boolean) Set to true to request - classification for a video at one-second intervals. AutoML Video - Intelligence returns labels and their confidence scores for each - second of the entire segment of the video that user specified in the - request configuration. WARNING: Model evaluation is not done for this - classification type, the quality of it depends on training data, but - there are no metrics provided to describe that quality. The default - is "false". - - - For Tables: - - feature_importance - (boolean) Whether feature importance should be - populated in the returned TablesAnnotations. The default is false. - - - For Video Object Tracking: - - ``score_threshold`` - (float) When Model detects objects on video - frames, it will only produce bounding boxes which have at least this - confidence score. Value in 0 to 1 range, default is 0.5. - ``max_bounding_box_count`` - (int64) No more than this number of - bounding boxes will be returned per frame. Default is 100, the - requested value may be limited by server. ``min_bounding_box_size`` - - (float) Only bounding boxes with shortest edge at least that long as - a relative value of video frame size will be returned. Value in 0 to - 1 range. Default is 0. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "batch_predict" not in self._inner_api_calls: - self._inner_api_calls[ - "batch_predict" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.batch_predict, - default_retry=self._method_configs["BatchPredict"].retry, - default_timeout=self._method_configs["BatchPredict"].timeout, - client_info=self._client_info, - ) - - request = prediction_service_pb2.BatchPredictRequest( - name=name, - input_config=input_config, - output_config=output_config, - params=params, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["batch_predict"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - prediction_service_pb2.BatchPredictResult, - metadata_type=proto_operations_pb2.OperationMetadata, - ) diff --git a/google/cloud/automl_v1beta1/gapic/prediction_service_client_config.py b/google/cloud/automl_v1beta1/gapic/prediction_service_client_config.py deleted file mode 100644 index 76c85878..00000000 --- a/google/cloud/automl_v1beta1/gapic/prediction_service_client_config.py +++ /dev/null @@ -1,39 +0,0 @@ -config = { - "interfaces": { - "google.cloud.automl.v1beta1.PredictionService": { - "retry_codes": {"no_retry_codes": [], "no_retry_1_codes": []}, - "retry_params": { - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - "no_retry_1_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 60000, - }, - }, - "methods": { - "Predict": { - "timeout_millis": 60000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "BatchPredict": { - "timeout_millis": 20000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - }, - } - } -} diff --git a/google/cloud/automl_v1beta1/gapic/transports/__init__.py b/google/cloud/automl_v1beta1/gapic/transports/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/automl_v1beta1/gapic/transports/auto_ml_grpc_transport.py b/google/cloud/automl_v1beta1/gapic/transports/auto_ml_grpc_transport.py deleted file mode 100644 index 8dcd0ec1..00000000 --- a/google/cloud/automl_v1beta1/gapic/transports/auto_ml_grpc_transport.py +++ /dev/null @@ -1,477 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.automl_v1beta1.proto import service_pb2_grpc - - -class AutoMlGrpcTransport(object): - """gRPC transport class providing stubs for - google.cloud.automl.v1beta1 AutoMl API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="automl.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "auto_ml_stub": service_pb2_grpc.AutoMlStub(channel), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="automl.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.create_dataset`. - - Creates a dataset. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].CreateDataset - - @property - def get_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_dataset`. - - Gets a dataset. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].GetDataset - - @property - def list_datasets(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_datasets`. - - Lists datasets in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ListDatasets - - @property - def update_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_dataset`. - - Updates a dataset. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].UpdateDataset - - @property - def delete_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.delete_dataset`. - - Deletes a dataset and all of its contents. Returns empty response in - the ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].DeleteDataset - - @property - def import_data(self): - """Return the gRPC stub for :meth:`AutoMlClient.import_data`. - - Imports data into a dataset. For Tables this method can only be - called on an empty Dataset. - - For Tables: - - - A ``schema_inference_version`` parameter must be explicitly set. - Returns an empty response in the ``response`` field when it - completes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ImportData - - @property - def export_data(self): - """Return the gRPC stub for :meth:`AutoMlClient.export_data`. - - Exports dataset's data to the provided output location. Returns an - empty response in the ``response`` field when it completes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ExportData - - @property - def get_annotation_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_annotation_spec`. - - Gets an annotation spec. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].GetAnnotationSpec - - @property - def get_table_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_table_spec`. - - Gets a table spec. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].GetTableSpec - - @property - def list_table_specs(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_table_specs`. - - Lists table specs in a dataset. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ListTableSpecs - - @property - def update_table_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_table_spec`. - - Updates a table spec. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].UpdateTableSpec - - @property - def get_column_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_column_spec`. - - Gets a column spec. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].GetColumnSpec - - @property - def list_column_specs(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_column_specs`. - - Lists column specs in a table spec. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ListColumnSpecs - - @property - def update_column_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_column_spec`. - - Updates a column spec. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].UpdateColumnSpec - - @property - def create_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.create_model`. - - Creates a model. Returns a Model in the ``response`` field when it - completes. When you create a model, several model evaluations are - created for it: a global evaluation, and one evaluation for each - annotation spec. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].CreateModel - - @property - def get_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_model`. - - Gets a model. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].GetModel - - @property - def list_models(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_models`. - - Lists models. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ListModels - - @property - def delete_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.delete_model`. - - Deletes a model. Returns ``google.protobuf.Empty`` in the - ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].DeleteModel - - @property - def deploy_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.deploy_model`. - - Deploys a model. If a model is already deployed, deploying it with - the same parameters has no effect. Deploying with different parametrs - (as e.g. changing - - ``node_number``) will reset the deployment state without pausing the - model's availability. - - Only applicable for Text Classification, Image Object Detection , - Tables, and Image Segmentation; all other domains manage deployment - automatically. - - Returns an empty response in the ``response`` field when it completes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].DeployModel - - @property - def undeploy_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.undeploy_model`. - - Undeploys a model. If the model is not deployed this method has no - effect. - - Only applicable for Text Classification, Image Object Detection and - Tables; all other domains manage deployment automatically. - - Returns an empty response in the ``response`` field when it completes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].UndeployModel - - @property - def export_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.export_model`. - - Exports a trained, "export-able", model to a user specified Google - Cloud Storage location. A model is considered export-able if and only if - it has an export format defined for it in - - ``ModelExportOutputConfig``. - - Returns an empty response in the ``response`` field when it completes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ExportModel - - @property - def export_evaluated_examples(self): - """Return the gRPC stub for :meth:`AutoMlClient.export_evaluated_examples`. - - Exports examples on which the model was evaluated (i.e. which were - in the TEST set of the dataset the model was created from), together - with their ground truth annotations and the annotations created - (predicted) by the model. The examples, ground truth and predictions are - exported in the state they were at the moment the model was evaluated. - - This export is available only for 30 days since the model evaluation is - created. - - Currently only available for Tables. - - Returns an empty response in the ``response`` field when it completes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ExportEvaluatedExamples - - @property - def get_model_evaluation(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_model_evaluation`. - - Gets a model evaluation. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].GetModelEvaluation - - @property - def list_model_evaluations(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_model_evaluations`. - - Lists model evaluations. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ListModelEvaluations diff --git a/google/cloud/automl_v1beta1/gapic/transports/prediction_service_grpc_transport.py b/google/cloud/automl_v1beta1/gapic/transports/prediction_service_grpc_transport.py deleted file mode 100644 index 6f2b37b1..00000000 --- a/google/cloud/automl_v1beta1/gapic/transports/prediction_service_grpc_transport.py +++ /dev/null @@ -1,175 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc - - -class PredictionServiceGrpcTransport(object): - """gRPC transport class providing stubs for - google.cloud.automl.v1beta1 PredictionService API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="automl.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "prediction_service_stub": prediction_service_pb2_grpc.PredictionServiceStub( - channel - ), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="automl.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def predict(self): - """Return the gRPC stub for :meth:`PredictionServiceClient.predict`. - - Perform an online prediction. The prediction result will be directly - returned in the response. Available for following ML problems, and their - expected request payloads: - - - Image Classification - Image in .JPEG, .GIF or .PNG format, - image_bytes up to 30MB. - - Image Object Detection - Image in .JPEG, .GIF or .PNG format, - image_bytes up to 30MB. - - Text Classification - TextSnippet, content up to 60,000 characters, - UTF-8 encoded. - - Text Extraction - TextSnippet, content up to 30,000 characters, UTF-8 - NFC encoded. - - Translation - TextSnippet, content up to 25,000 characters, UTF-8 - encoded. - - Tables - Row, with column values matching the columns of the model, - up to 5MB. Not available for FORECASTING - - ``prediction_type``. - - - Text Sentiment - TextSnippet, content up 500 characters, UTF-8 - encoded. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["prediction_service_stub"].Predict - - @property - def batch_predict(self): - """Return the gRPC stub for :meth:`PredictionServiceClient.batch_predict`. - - Perform a batch prediction. Unlike the online ``Predict``, batch - prediction result won't be immediately available in the response. - Instead, a long running operation object is returned. User can poll the - operation result via ``GetOperation`` method. Once the operation is - done, ``BatchPredictResult`` is returned in the ``response`` field. - Available for following ML problems: - - - Image Classification - - Image Object Detection - - Video Classification - - Video Object Tracking \* Text Extraction - - Tables - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["prediction_service_stub"].BatchPredict diff --git a/google/cloud/automl_v1beta1/proto/__init__.py b/google/cloud/automl_v1beta1/proto/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/automl_v1beta1/proto/annotation_payload.proto b/google/cloud/automl_v1beta1/proto/annotation_payload.proto deleted file mode 100644 index f62bb269..00000000 --- a/google/cloud/automl_v1beta1/proto/annotation_payload.proto +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/cloud/automl/v1beta1/classification.proto"; -import "google/cloud/automl/v1beta1/detection.proto"; -import "google/cloud/automl/v1beta1/tables.proto"; -import "google/cloud/automl/v1beta1/text_extraction.proto"; -import "google/cloud/automl/v1beta1/text_sentiment.proto"; -import "google/cloud/automl/v1beta1/translation.proto"; -import "google/protobuf/any.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// Contains annotation information that is relevant to AutoML. -message AnnotationPayload { - // Output only . Additional information about the annotation - // specific to the AutoML domain. - oneof detail { - // Annotation details for translation. - TranslationAnnotation translation = 2; - - // Annotation details for content or image classification. - ClassificationAnnotation classification = 3; - - // Annotation details for image object detection. - ImageObjectDetectionAnnotation image_object_detection = 4; - - // Annotation details for video classification. - // Returned for Video Classification predictions. - VideoClassificationAnnotation video_classification = 9; - - // Annotation details for video object tracking. - VideoObjectTrackingAnnotation video_object_tracking = 8; - - // Annotation details for text extraction. - TextExtractionAnnotation text_extraction = 6; - - // Annotation details for text sentiment. - TextSentimentAnnotation text_sentiment = 7; - - // Annotation details for Tables. - TablesAnnotation tables = 10; - } - - // Output only . The resource ID of the annotation spec that - // this annotation pertains to. The annotation spec comes from either an - // ancestor dataset, or the dataset that was used to train the model in use. - string annotation_spec_id = 1; - - // Output only. The value of - // [display_name][google.cloud.automl.v1beta1.AnnotationSpec.display_name] - // when the model was trained. Because this field returns a value at model - // training time, for different models trained using the same dataset, the - // returned value could be different as model owner could update the - // `display_name` between any two model training. - string display_name = 5; -} diff --git a/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py b/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py deleted file mode 100644 index bf06fb77..00000000 --- a/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py +++ /dev/null @@ -1,417 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/annotation_payload.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.automl_v1beta1.proto import ( - classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - detection_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - tables_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - text_extraction_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__extraction__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - text_sentiment_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__sentiment__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - translation_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_translation__pb2, -) -from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/annotation_payload.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n:google/cloud/automl_v1beta1/proto/annotation_payload.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x31google/cloud/automl_v1beta1/proto/detection.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a\x37google/cloud/automl_v1beta1/proto/text_extraction.proto\x1a\x36google/cloud/automl_v1beta1/proto/text_sentiment.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/api/annotations.proto"\xe6\x05\n\x11\x41nnotationPayload\x12I\n\x0btranslation\x18\x02 \x01(\x0b\x32\x32.google.cloud.automl.v1beta1.TranslationAnnotationH\x00\x12O\n\x0e\x63lassification\x18\x03 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.ClassificationAnnotationH\x00\x12]\n\x16image_object_detection\x18\x04 \x01(\x0b\x32;.google.cloud.automl.v1beta1.ImageObjectDetectionAnnotationH\x00\x12Z\n\x14video_classification\x18\t \x01(\x0b\x32:.google.cloud.automl.v1beta1.VideoClassificationAnnotationH\x00\x12[\n\x15video_object_tracking\x18\x08 \x01(\x0b\x32:.google.cloud.automl.v1beta1.VideoObjectTrackingAnnotationH\x00\x12P\n\x0ftext_extraction\x18\x06 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.TextExtractionAnnotationH\x00\x12N\n\x0etext_sentiment\x18\x07 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.TextSentimentAnnotationH\x00\x12?\n\x06tables\x18\n \x01(\x0b\x32-.google.cloud.automl.v1beta1.TablesAnnotationH\x00\x12\x1a\n\x12\x61nnotation_spec_id\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x05 \x01(\tB\x08\n\x06\x64\x65tailB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__extraction__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__sentiment__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_translation__pb2.DESCRIPTOR, - google_dot_protobuf_dot_any__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_ANNOTATIONPAYLOAD = _descriptor.Descriptor( - name="AnnotationPayload", - full_name="google.cloud.automl.v1beta1.AnnotationPayload", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="translation", - full_name="google.cloud.automl.v1beta1.AnnotationPayload.translation", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="classification", - full_name="google.cloud.automl.v1beta1.AnnotationPayload.classification", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="image_object_detection", - full_name="google.cloud.automl.v1beta1.AnnotationPayload.image_object_detection", - index=2, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="video_classification", - full_name="google.cloud.automl.v1beta1.AnnotationPayload.video_classification", - index=3, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="video_object_tracking", - full_name="google.cloud.automl.v1beta1.AnnotationPayload.video_object_tracking", - index=4, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="text_extraction", - full_name="google.cloud.automl.v1beta1.AnnotationPayload.text_extraction", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="text_sentiment", - full_name="google.cloud.automl.v1beta1.AnnotationPayload.text_sentiment", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="tables", - full_name="google.cloud.automl.v1beta1.AnnotationPayload.tables", - index=7, - number=10, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="annotation_spec_id", - full_name="google.cloud.automl.v1beta1.AnnotationPayload.annotation_spec_id", - index=8, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.cloud.automl.v1beta1.AnnotationPayload.display_name", - index=9, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="detail", - full_name="google.cloud.automl.v1beta1.AnnotationPayload.detail", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=470, - serialized_end=1212, -) - -_ANNOTATIONPAYLOAD.fields_by_name[ - "translation" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_translation__pb2._TRANSLATIONANNOTATION -) -_ANNOTATIONPAYLOAD.fields_by_name[ - "classification" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2._CLASSIFICATIONANNOTATION -) -_ANNOTATIONPAYLOAD.fields_by_name[ - "image_object_detection" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2._IMAGEOBJECTDETECTIONANNOTATION -) -_ANNOTATIONPAYLOAD.fields_by_name[ - "video_classification" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2._VIDEOCLASSIFICATIONANNOTATION -) -_ANNOTATIONPAYLOAD.fields_by_name[ - "video_object_tracking" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2._VIDEOOBJECTTRACKINGANNOTATION -) -_ANNOTATIONPAYLOAD.fields_by_name[ - "text_extraction" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__extraction__pb2._TEXTEXTRACTIONANNOTATION -) -_ANNOTATIONPAYLOAD.fields_by_name[ - "text_sentiment" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__sentiment__pb2._TEXTSENTIMENTANNOTATION -) -_ANNOTATIONPAYLOAD.fields_by_name[ - "tables" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2._TABLESANNOTATION -) -_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append( - _ANNOTATIONPAYLOAD.fields_by_name["translation"] -) -_ANNOTATIONPAYLOAD.fields_by_name[ - "translation" -].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"] -_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append( - _ANNOTATIONPAYLOAD.fields_by_name["classification"] -) -_ANNOTATIONPAYLOAD.fields_by_name[ - "classification" -].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"] -_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append( - _ANNOTATIONPAYLOAD.fields_by_name["image_object_detection"] -) -_ANNOTATIONPAYLOAD.fields_by_name[ - "image_object_detection" -].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"] -_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append( - _ANNOTATIONPAYLOAD.fields_by_name["video_classification"] -) -_ANNOTATIONPAYLOAD.fields_by_name[ - "video_classification" -].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"] -_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append( - _ANNOTATIONPAYLOAD.fields_by_name["video_object_tracking"] -) -_ANNOTATIONPAYLOAD.fields_by_name[ - "video_object_tracking" -].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"] -_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append( - _ANNOTATIONPAYLOAD.fields_by_name["text_extraction"] -) -_ANNOTATIONPAYLOAD.fields_by_name[ - "text_extraction" -].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"] -_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append( - _ANNOTATIONPAYLOAD.fields_by_name["text_sentiment"] -) -_ANNOTATIONPAYLOAD.fields_by_name[ - "text_sentiment" -].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"] -_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append( - _ANNOTATIONPAYLOAD.fields_by_name["tables"] -) -_ANNOTATIONPAYLOAD.fields_by_name[ - "tables" -].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"] -DESCRIPTOR.message_types_by_name["AnnotationPayload"] = _ANNOTATIONPAYLOAD -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -AnnotationPayload = _reflection.GeneratedProtocolMessageType( - "AnnotationPayload", - (_message.Message,), - { - "DESCRIPTOR": _ANNOTATIONPAYLOAD, - "__module__": "google.cloud.automl_v1beta1.proto.annotation_payload_pb2", - "__doc__": """Contains annotation information that is relevant to AutoML. - - Attributes: - detail: - Output only . Additional information about the annotation - specific to the AutoML domain. - translation: - Annotation details for translation. - classification: - Annotation details for content or image classification. - image_object_detection: - Annotation details for image object detection. - video_classification: - Annotation details for video classification. Returned for - Video Classification predictions. - video_object_tracking: - Annotation details for video object tracking. - text_extraction: - Annotation details for text extraction. - text_sentiment: - Annotation details for text sentiment. - tables: - Annotation details for Tables. - annotation_spec_id: - Output only . The resource ID of the annotation spec that this - annotation pertains to. The annotation spec comes from either - an ancestor dataset, or the dataset that was used to train the - model in use. - display_name: - Output only. The value of [display_name][google.cloud.automl.v - 1beta1.AnnotationSpec.display_name] when the model was - trained. Because this field returns a value at model training - time, for different models trained using the same dataset, the - returned value could be different as model owner could update - the ``display_name`` between any two model training. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.AnnotationPayload) - }, -) -_sym_db.RegisterMessage(AnnotationPayload) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/annotation_payload_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/annotation_payload_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/annotation_payload_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/annotation_spec.proto b/google/cloud/automl_v1beta1/proto/annotation_spec.proto deleted file mode 100644 index d9df07ee..00000000 --- a/google/cloud/automl_v1beta1/proto/annotation_spec.proto +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/api/resource.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// A definition of an annotation spec. -message AnnotationSpec { - option (google.api.resource) = { - type: "automl.googleapis.com/AnnotationSpec" - pattern: "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}" - }; - - // Output only. Resource name of the annotation spec. - // Form: - // - // 'projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/annotationSpecs/{annotation_spec_id}' - string name = 1; - - // Required. The name of the annotation spec to show in the interface. The name can be - // up to 32 characters long and must match the regexp `[a-zA-Z0-9_]+`. - string display_name = 2; - - // Output only. The number of examples in the parent dataset - // labeled by the annotation spec. - int32 example_count = 9; -} diff --git a/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py b/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py deleted file mode 100644 index c259a290..00000000 --- a/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py +++ /dev/null @@ -1,143 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/annotation_spec.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/annotation_spec.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n7google/cloud/automl_v1beta1/proto/annotation_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto"\xd6\x01\n\x0e\x41nnotationSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x15\n\rexample_count\x18\t \x01(\x05:\x88\x01\xea\x41\x84\x01\n$automl.googleapis.com/AnnotationSpec\x12\\projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}B\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_ANNOTATIONSPEC = _descriptor.Descriptor( - name="AnnotationSpec", - full_name="google.cloud.automl.v1beta1.AnnotationSpec", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.AnnotationSpec.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.cloud.automl.v1beta1.AnnotationSpec.display_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="example_count", - full_name="google.cloud.automl.v1beta1.AnnotationSpec.example_count", - index=2, - number=9, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"\352A\204\001\n$automl.googleapis.com/AnnotationSpec\022\\projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=146, - serialized_end=360, -) - -DESCRIPTOR.message_types_by_name["AnnotationSpec"] = _ANNOTATIONSPEC -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -AnnotationSpec = _reflection.GeneratedProtocolMessageType( - "AnnotationSpec", - (_message.Message,), - { - "DESCRIPTOR": _ANNOTATIONSPEC, - "__module__": "google.cloud.automl_v1beta1.proto.annotation_spec_pb2", - "__doc__": """A definition of an annotation spec. - - Attributes: - name: - Output only. Resource name of the annotation spec. Form: ‘pro - jects/{project_id}/locations/{location_id}/datasets/{dataset_i - d}/annotationSpecs/{annotation_spec_id}’ - display_name: - Required. The name of the annotation spec to show in the - interface. The name can be up to 32 characters long and must - match the regexp ``[a-zA-Z0-9_]+``. - example_count: - Output only. The number of examples in the parent dataset - labeled by the annotation spec. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.AnnotationSpec) - }, -) -_sym_db.RegisterMessage(AnnotationSpec) - - -DESCRIPTOR._options = None -_ANNOTATIONSPEC._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/annotation_spec_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/annotation_spec_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/annotation_spec_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/classification.proto b/google/cloud/automl_v1beta1/proto/classification.proto deleted file mode 100644 index 0594d01e..00000000 --- a/google/cloud/automl_v1beta1/proto/classification.proto +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/cloud/automl/v1beta1/temporal.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_outer_classname = "ClassificationProto"; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// Type of the classification problem. -enum ClassificationType { - // An un-set value of this enum. - CLASSIFICATION_TYPE_UNSPECIFIED = 0; - - // At most one label is allowed per example. - MULTICLASS = 1; - - // Multiple labels are allowed for one example. - MULTILABEL = 2; -} - -// Contains annotation details specific to classification. -message ClassificationAnnotation { - // Output only. A confidence estimate between 0.0 and 1.0. A higher value - // means greater confidence that the annotation is positive. If a user - // approves an annotation as negative or positive, the score value remains - // unchanged. If a user creates an annotation, the score is 0 for negative or - // 1 for positive. - float score = 1; -} - -// Contains annotation details specific to video classification. -message VideoClassificationAnnotation { - // Output only. Expresses the type of video classification. Possible values: - // - // * `segment` - Classification done on a specified by user - // time segment of a video. AnnotationSpec is answered to be present - // in that time segment, if it is present in any part of it. The video - // ML model evaluations are done only for this type of classification. - // - // * `shot`- Shot-level classification. - // AutoML Video Intelligence determines the boundaries - // for each camera shot in the entire segment of the video that user - // specified in the request configuration. AutoML Video Intelligence - // then returns labels and their confidence scores for each detected - // shot, along with the start and end time of the shot. - // WARNING: Model evaluation is not done for this classification type, - // the quality of it depends on training data, but there are no - // metrics provided to describe that quality. - // - // * `1s_interval` - AutoML Video Intelligence returns labels and their - // confidence scores for each second of the entire segment of the video - // that user specified in the request configuration. - // WARNING: Model evaluation is not done for this classification type, - // the quality of it depends on training data, but there are no - // metrics provided to describe that quality. - string type = 1; - - // Output only . The classification details of this annotation. - ClassificationAnnotation classification_annotation = 2; - - // Output only . The time segment of the video to which the - // annotation applies. - TimeSegment time_segment = 3; -} - -// Model evaluation metrics for classification problems. -// Note: For Video Classification this metrics only describe quality of the -// Video Classification predictions of "segment_classification" type. -message ClassificationEvaluationMetrics { - // Metrics for a single confidence threshold. - message ConfidenceMetricsEntry { - // Output only. Metrics are computed with an assumption that the model - // never returns predictions with score lower than this value. - float confidence_threshold = 1; - - // Output only. Metrics are computed with an assumption that the model - // always returns at most this many predictions (ordered by their score, - // descendingly), but they all still need to meet the confidence_threshold. - int32 position_threshold = 14; - - // Output only. Recall (True Positive Rate) for the given confidence - // threshold. - float recall = 2; - - // Output only. Precision for the given confidence threshold. - float precision = 3; - - // Output only. False Positive Rate for the given confidence threshold. - float false_positive_rate = 8; - - // Output only. The harmonic mean of recall and precision. - float f1_score = 4; - - // Output only. The Recall (True Positive Rate) when only considering the - // label that has the highest prediction score and not below the confidence - // threshold for each example. - float recall_at1 = 5; - - // Output only. The precision when only considering the label that has the - // highest prediction score and not below the confidence threshold for each - // example. - float precision_at1 = 6; - - // Output only. The False Positive Rate when only considering the label that - // has the highest prediction score and not below the confidence threshold - // for each example. - float false_positive_rate_at1 = 9; - - // Output only. The harmonic mean of [recall_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] and [precision_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1]. - float f1_score_at1 = 7; - - // Output only. The number of model created labels that match a ground truth - // label. - int64 true_positive_count = 10; - - // Output only. The number of model created labels that do not match a - // ground truth label. - int64 false_positive_count = 11; - - // Output only. The number of ground truth labels that are not matched - // by a model created label. - int64 false_negative_count = 12; - - // Output only. The number of labels that were not created by the model, - // but if they would, they would not match a ground truth label. - int64 true_negative_count = 13; - } - - // Confusion matrix of the model running the classification. - message ConfusionMatrix { - // Output only. A row in the confusion matrix. - message Row { - // Output only. Value of the specific cell in the confusion matrix. - // The number of values each row has (i.e. the length of the row) is equal - // to the length of the `annotation_spec_id` field or, if that one is not - // populated, length of the [display_name][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name] field. - repeated int32 example_count = 1; - } - - // Output only. IDs of the annotation specs used in the confusion matrix. - // For Tables CLASSIFICATION - // - // [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type] - // only list of [annotation_spec_display_name-s][] is populated. - repeated string annotation_spec_id = 1; - - // Output only. Display name of the annotation specs used in the confusion - // matrix, as they were at the moment of the evaluation. For Tables - // CLASSIFICATION - // - // [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type], - // distinct values of the target column at the moment of the model - // evaluation are populated here. - repeated string display_name = 3; - - // Output only. Rows in the confusion matrix. The number of rows is equal to - // the size of `annotation_spec_id`. - // `row[i].example_count[j]` is the number of examples that have ground - // truth of the `annotation_spec_id[i]` and are predicted as - // `annotation_spec_id[j]` by the model being evaluated. - repeated Row row = 2; - } - - // Output only. The Area Under Precision-Recall Curve metric. Micro-averaged - // for the overall evaluation. - float au_prc = 1; - - // Output only. The Area Under Precision-Recall Curve metric based on priors. - // Micro-averaged for the overall evaluation. - // Deprecated. - float base_au_prc = 2 [deprecated = true]; - - // Output only. The Area Under Receiver Operating Characteristic curve metric. - // Micro-averaged for the overall evaluation. - float au_roc = 6; - - // Output only. The Log Loss metric. - float log_loss = 7; - - // Output only. Metrics for each confidence_threshold in - // 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and - // position_threshold = INT32_MAX_VALUE. - // ROC and precision-recall curves, and other aggregated metrics are derived - // from them. The confidence metrics entries may also be supplied for - // additional values of position_threshold, but from these no aggregated - // metrics are computed. - repeated ConfidenceMetricsEntry confidence_metrics_entry = 3; - - // Output only. Confusion matrix of the evaluation. - // Only set for MULTICLASS classification problems where number - // of labels is no more than 10. - // Only set for model level evaluation, not for evaluation per label. - ConfusionMatrix confusion_matrix = 4; - - // Output only. The annotation spec ids used for this evaluation. - repeated string annotation_spec_id = 5; -} diff --git a/google/cloud/automl_v1beta1/proto/classification_pb2.py b/google/cloud/automl_v1beta1/proto/classification_pb2.py deleted file mode 100644 index 9b38e2db..00000000 --- a/google/cloud/automl_v1beta1/proto/classification_pb2.py +++ /dev/null @@ -1,1027 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/classification.proto - -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.automl_v1beta1.proto import ( - temporal_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2, -) -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/classification.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\023ClassificationProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n6google/cloud/automl_v1beta1/proto/classification.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto\x1a\x1cgoogle/api/annotations.proto")\n\x18\x43lassificationAnnotation\x12\r\n\x05score\x18\x01 \x01(\x02"\xc7\x01\n\x1dVideoClassificationAnnotation\x12\x0c\n\x04type\x18\x01 \x01(\t\x12X\n\x19\x63lassification_annotation\x18\x02 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.ClassificationAnnotation\x12>\n\x0ctime_segment\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TimeSegment"\xa9\x07\n\x1f\x43lassificationEvaluationMetrics\x12\x0e\n\x06\x61u_prc\x18\x01 \x01(\x02\x12\x17\n\x0b\x62\x61se_au_prc\x18\x02 \x01(\x02\x42\x02\x18\x01\x12\x0e\n\x06\x61u_roc\x18\x06 \x01(\x02\x12\x10\n\x08log_loss\x18\x07 \x01(\x02\x12u\n\x18\x63onfidence_metrics_entry\x18\x03 \x03(\x0b\x32S.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry\x12\x66\n\x10\x63onfusion_matrix\x18\x04 \x01(\x0b\x32L.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x05 \x03(\t\x1a\xfc\x02\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x1a\n\x12position_threshold\x18\x0e \x01(\x05\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x11\n\tprecision\x18\x03 \x01(\x02\x12\x1b\n\x13\x66\x61lse_positive_rate\x18\x08 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x04 \x01(\x02\x12\x12\n\nrecall_at1\x18\x05 \x01(\x02\x12\x15\n\rprecision_at1\x18\x06 \x01(\x02\x12\x1f\n\x17\x66\x61lse_positive_rate_at1\x18\t \x01(\x02\x12\x14\n\x0c\x66\x31_score_at1\x18\x07 \x01(\x02\x12\x1b\n\x13true_positive_count\x18\n \x01(\x03\x12\x1c\n\x14\x66\x61lse_positive_count\x18\x0b \x01(\x03\x12\x1c\n\x14\x66\x61lse_negative_count\x18\x0c \x01(\x03\x12\x1b\n\x13true_negative_count\x18\r \x01(\x03\x1a\xc0\x01\n\x0f\x43onfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x01 \x03(\t\x12\x14\n\x0c\x64isplay_name\x18\x03 \x03(\t\x12]\n\x03row\x18\x02 \x03(\x0b\x32P.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.Row\x1a\x1c\n\x03Row\x12\x15\n\rexample_count\x18\x01 \x03(\x05*Y\n\x12\x43lassificationType\x12#\n\x1f\x43LASSIFICATION_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nMULTICLASS\x10\x01\x12\x0e\n\nMULTILABEL\x10\x02\x42\xb8\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x13\x43lassificationProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - -_CLASSIFICATIONTYPE = _descriptor.EnumDescriptor( - name="ClassificationType", - full_name="google.cloud.automl.v1beta1.ClassificationType", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="CLASSIFICATION_TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="MULTICLASS", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="MULTILABEL", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1352, - serialized_end=1441, -) -_sym_db.RegisterEnumDescriptor(_CLASSIFICATIONTYPE) - -ClassificationType = enum_type_wrapper.EnumTypeWrapper(_CLASSIFICATIONTYPE) -CLASSIFICATION_TYPE_UNSPECIFIED = 0 -MULTICLASS = 1 -MULTILABEL = 2 - - -_CLASSIFICATIONANNOTATION = _descriptor.Descriptor( - name="ClassificationAnnotation", - full_name="google.cloud.automl.v1beta1.ClassificationAnnotation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="score", - full_name="google.cloud.automl.v1beta1.ClassificationAnnotation.score", - index=0, - number=1, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=167, - serialized_end=208, -) - - -_VIDEOCLASSIFICATIONANNOTATION = _descriptor.Descriptor( - name="VideoClassificationAnnotation", - full_name="google.cloud.automl.v1beta1.VideoClassificationAnnotation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="type", - full_name="google.cloud.automl.v1beta1.VideoClassificationAnnotation.type", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="classification_annotation", - full_name="google.cloud.automl.v1beta1.VideoClassificationAnnotation.classification_annotation", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="time_segment", - full_name="google.cloud.automl.v1beta1.VideoClassificationAnnotation.time_segment", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=211, - serialized_end=410, -) - - -_CLASSIFICATIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY = _descriptor.Descriptor( - name="ConfidenceMetricsEntry", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="confidence_threshold", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.confidence_threshold", - index=0, - number=1, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="position_threshold", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.position_threshold", - index=1, - number=14, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="recall", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall", - index=2, - number=2, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="precision", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision", - index=3, - number=3, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="false_positive_rate", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.false_positive_rate", - index=4, - number=8, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="f1_score", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.f1_score", - index=5, - number=4, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="recall_at1", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1", - index=6, - number=5, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="precision_at1", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1", - index=7, - number=6, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="false_positive_rate_at1", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.false_positive_rate_at1", - index=8, - number=9, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="f1_score_at1", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.f1_score_at1", - index=9, - number=7, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="true_positive_count", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.true_positive_count", - index=10, - number=10, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="false_positive_count", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.false_positive_count", - index=11, - number=11, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="false_negative_count", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.false_negative_count", - index=12, - number=12, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="true_negative_count", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.true_negative_count", - index=13, - number=13, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=775, - serialized_end=1155, -) - -_CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW = _descriptor.Descriptor( - name="Row", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.Row", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="example_count", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.Row.example_count", - index=0, - number=1, - type=5, - cpp_type=1, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1322, - serialized_end=1350, -) - -_CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX = _descriptor.Descriptor( - name="ConfusionMatrix", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="annotation_spec_id", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.annotation_spec_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name", - index=1, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.row", - index=2, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1158, - serialized_end=1350, -) - -_CLASSIFICATIONEVALUATIONMETRICS = _descriptor.Descriptor( - name="ClassificationEvaluationMetrics", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="au_prc", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.au_prc", - index=0, - number=1, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="base_au_prc", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.base_au_prc", - index=1, - number=2, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\030\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="au_roc", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.au_roc", - index=2, - number=6, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="log_loss", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.log_loss", - index=3, - number=7, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="confidence_metrics_entry", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.confidence_metrics_entry", - index=4, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="confusion_matrix", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.confusion_matrix", - index=5, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="annotation_spec_id", - full_name="google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.annotation_spec_id", - index=6, - number=5, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _CLASSIFICATIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, - _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=413, - serialized_end=1350, -) - -_VIDEOCLASSIFICATIONANNOTATION.fields_by_name[ - "classification_annotation" -].message_type = _CLASSIFICATIONANNOTATION -_VIDEOCLASSIFICATIONANNOTATION.fields_by_name[ - "time_segment" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2._TIMESEGMENT -) -_CLASSIFICATIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY.containing_type = ( - _CLASSIFICATIONEVALUATIONMETRICS -) -_CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW.containing_type = ( - _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX -) -_CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX.fields_by_name[ - "row" -].message_type = _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW -_CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX.containing_type = ( - _CLASSIFICATIONEVALUATIONMETRICS -) -_CLASSIFICATIONEVALUATIONMETRICS.fields_by_name[ - "confidence_metrics_entry" -].message_type = _CLASSIFICATIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY -_CLASSIFICATIONEVALUATIONMETRICS.fields_by_name[ - "confusion_matrix" -].message_type = _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX -DESCRIPTOR.message_types_by_name["ClassificationAnnotation"] = _CLASSIFICATIONANNOTATION -DESCRIPTOR.message_types_by_name[ - "VideoClassificationAnnotation" -] = _VIDEOCLASSIFICATIONANNOTATION -DESCRIPTOR.message_types_by_name[ - "ClassificationEvaluationMetrics" -] = _CLASSIFICATIONEVALUATIONMETRICS -DESCRIPTOR.enum_types_by_name["ClassificationType"] = _CLASSIFICATIONTYPE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ClassificationAnnotation = _reflection.GeneratedProtocolMessageType( - "ClassificationAnnotation", - (_message.Message,), - { - "DESCRIPTOR": _CLASSIFICATIONANNOTATION, - "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", - "__doc__": """Contains annotation details specific to classification. - - Attributes: - score: - Output only. A confidence estimate between 0.0 and 1.0. A - higher value means greater confidence that the annotation is - positive. If a user approves an annotation as negative or - positive, the score value remains unchanged. If a user creates - an annotation, the score is 0 for negative or 1 for positive. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationAnnotation) - }, -) -_sym_db.RegisterMessage(ClassificationAnnotation) - -VideoClassificationAnnotation = _reflection.GeneratedProtocolMessageType( - "VideoClassificationAnnotation", - (_message.Message,), - { - "DESCRIPTOR": _VIDEOCLASSIFICATIONANNOTATION, - "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", - "__doc__": """Contains annotation details specific to video classification. - - Attributes: - type: - Output only. Expresses the type of video classification. - Possible values: - ``segment`` - Classification done on a - specified by user time segment of a video. AnnotationSpec - is answered to be present in that time segment, if it is - present in any part of it. The video ML model evaluations - are done only for this type of classification. - ``shot``- - Shot-level classification. AutoML Video Intelligence - determines the boundaries for each camera shot in the entire - segment of the video that user specified in the request - configuration. AutoML Video Intelligence then returns - labels and their confidence scores for each detected shot, - along with the start and end time of the shot. WARNING: - Model evaluation is not done for this classification type, - the quality of it depends on training data, but there are no - metrics provided to describe that quality. - ``1s_interval`` - - AutoML Video Intelligence returns labels and their - confidence scores for each second of the entire segment of the - video that user specified in the request configuration. - WARNING: Model evaluation is not done for this - classification type, the quality of it depends on training - data, but there are no metrics provided to describe that - quality. - classification_annotation: - Output only . The classification details of this annotation. - time_segment: - Output only . The time segment of the video to which the - annotation applies. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoClassificationAnnotation) - }, -) -_sym_db.RegisterMessage(VideoClassificationAnnotation) - -ClassificationEvaluationMetrics = _reflection.GeneratedProtocolMessageType( - "ClassificationEvaluationMetrics", - (_message.Message,), - { - "ConfidenceMetricsEntry": _reflection.GeneratedProtocolMessageType( - "ConfidenceMetricsEntry", - (_message.Message,), - { - "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, - "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", - "__doc__": """Metrics for a single confidence threshold. - - Attributes: - confidence_threshold: - Output only. Metrics are computed with an assumption that the - model never returns predictions with score lower than this - value. - position_threshold: - Output only. Metrics are computed with an assumption that the - model always returns at most this many predictions (ordered by - their score, descendingly), but they all still need to meet - the confidence_threshold. - recall: - Output only. Recall (True Positive Rate) for the given - confidence threshold. - precision: - Output only. Precision for the given confidence threshold. - false_positive_rate: - Output only. False Positive Rate for the given confidence - threshold. - f1_score: - Output only. The harmonic mean of recall and precision. - recall_at1: - Output only. The Recall (True Positive Rate) when only - considering the label that has the highest prediction score - and not below the confidence threshold for each example. - precision_at1: - Output only. The precision when only considering the label - that has the highest prediction score and not below the - confidence threshold for each example. - false_positive_rate_at1: - Output only. The False Positive Rate when only considering the - label that has the highest prediction score and not below the - confidence threshold for each example. - f1_score_at1: - Output only. The harmonic mean of [recall_at1][google.cloud.au - toml.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetrics - Entry.recall_at1] and [precision_at1][google.cloud.automl.v1be - ta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.pre - cision_at1]. - true_positive_count: - Output only. The number of model created labels that match a - ground truth label. - false_positive_count: - Output only. The number of model created labels that do not - match a ground truth label. - false_negative_count: - Output only. The number of ground truth labels that are not - matched by a model created label. - true_negative_count: - Output only. The number of labels that were not created by the - model, but if they would, they would not match a ground truth - label. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry) - }, - ), - "ConfusionMatrix": _reflection.GeneratedProtocolMessageType( - "ConfusionMatrix", - (_message.Message,), - { - "Row": _reflection.GeneratedProtocolMessageType( - "Row", - (_message.Message,), - { - "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW, - "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", - "__doc__": """Output only. A row in the confusion matrix. - - Attributes: - example_count: - Output only. Value of the specific cell in the confusion - matrix. The number of values each row has (i.e. the length of - the row) is equal to the length of the ``annotation_spec_id`` - field or, if that one is not populated, length of the [display - _name][google.cloud.automl.v1beta1.ClassificationEvaluationMet - rics.ConfusionMatrix.display_name] field. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.Row) - }, - ), - "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX, - "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", - "__doc__": """Confusion matrix of the model running the classification. - - Attributes: - annotation_spec_id: - Output only. IDs of the annotation specs used in the confusion - matrix. For Tables CLASSIFICATION [prediction_type][google.cl - oud.automl.v1beta1.TablesModelMetadata.prediction_type] only - list of [annotation_spec_display_name-s][] is populated. - display_name: - Output only. Display name of the annotation specs used in the - confusion matrix, as they were at the moment of the - evaluation. For Tables CLASSIFICATION [prediction_type-s][goo - gle.cloud.automl.v1beta1.TablesModelMetadata.prediction_type], - distinct values of the target column at the moment of the - model evaluation are populated here. - row: - Output only. Rows in the confusion matrix. The number of rows - is equal to the size of ``annotation_spec_id``. - ``row[i].example_count[j]`` is the number of examples that - have ground truth of the ``annotation_spec_id[i]`` and are - predicted as ``annotation_spec_id[j]`` by the model being - evaluated. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix) - }, - ), - "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS, - "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", - "__doc__": """Model evaluation metrics for classification problems. Note: For Video - Classification this metrics only describe quality of the Video - Classification predictions of “segment_classification” type. - - Attributes: - au_prc: - Output only. The Area Under Precision-Recall Curve metric. - Micro-averaged for the overall evaluation. - base_au_prc: - Output only. The Area Under Precision-Recall Curve metric - based on priors. Micro-averaged for the overall evaluation. - Deprecated. - au_roc: - Output only. The Area Under Receiver Operating Characteristic - curve metric. Micro-averaged for the overall evaluation. - log_loss: - Output only. The Log Loss metric. - confidence_metrics_entry: - Output only. Metrics for each confidence_threshold in - 0.00,0.05,0.10,…,0.95,0.96,0.97,0.98,0.99 and - position_threshold = INT32_MAX_VALUE. ROC and precision-recall - curves, and other aggregated metrics are derived from them. - The confidence metrics entries may also be supplied for - additional values of position_threshold, but from these no - aggregated metrics are computed. - confusion_matrix: - Output only. Confusion matrix of the evaluation. Only set for - MULTICLASS classification problems where number of labels is - no more than 10. Only set for model level evaluation, not for - evaluation per label. - annotation_spec_id: - Output only. The annotation spec ids used for this evaluation. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationEvaluationMetrics) - }, -) -_sym_db.RegisterMessage(ClassificationEvaluationMetrics) -_sym_db.RegisterMessage(ClassificationEvaluationMetrics.ConfidenceMetricsEntry) -_sym_db.RegisterMessage(ClassificationEvaluationMetrics.ConfusionMatrix) -_sym_db.RegisterMessage(ClassificationEvaluationMetrics.ConfusionMatrix.Row) - - -DESCRIPTOR._options = None -_CLASSIFICATIONEVALUATIONMETRICS.fields_by_name["base_au_prc"]._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/classification_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/classification_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/classification_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/column_spec.proto b/google/cloud/automl_v1beta1/proto/column_spec.proto deleted file mode 100644 index 03389b8a..00000000 --- a/google/cloud/automl_v1beta1/proto/column_spec.proto +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/api/resource.proto"; -import "google/cloud/automl/v1beta1/data_stats.proto"; -import "google/cloud/automl/v1beta1/data_types.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were -// given on import . -// Used by: -// * Tables -message ColumnSpec { - option (google.api.resource) = { - type: "automl.googleapis.com/ColumnSpec" - pattern: "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}" - }; - - // Identifies the table's column, and its correlation with the column this - // ColumnSpec describes. - message CorrelatedColumn { - // The column_spec_id of the correlated column, which belongs to the same - // table as the in-context column. - string column_spec_id = 1; - - // Correlation between this and the in-context column. - CorrelationStats correlation_stats = 2; - } - - // Output only. The resource name of the column specs. - // Form: - // - // `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/tableSpecs/{table_spec_id}/columnSpecs/{column_spec_id}` - string name = 1; - - // The data type of elements stored in the column. - DataType data_type = 2; - - // Output only. The name of the column to show in the interface. The name can - // be up to 100 characters long and can consist only of ASCII Latin letters - // A-Z and a-z, ASCII digits 0-9, underscores(_), and forward slashes(/), and - // must start with a letter or a digit. - string display_name = 3; - - // Output only. Stats of the series of values in the column. - // This field may be stale, see the ancestor's - // Dataset.tables_dataset_metadata.stats_update_time field - // for the timestamp at which these stats were last updated. - DataStats data_stats = 4; - - // Deprecated. - repeated CorrelatedColumn top_correlated_columns = 5; - - // Used to perform consistent read-modify-write updates. If not set, a blind - // "overwrite" update happens. - string etag = 6; -} diff --git a/google/cloud/automl_v1beta1/proto/column_spec_pb2.py b/google/cloud/automl_v1beta1/proto/column_spec_pb2.py deleted file mode 100644 index b32f9826..00000000 --- a/google/cloud/automl_v1beta1/proto/column_spec_pb2.py +++ /dev/null @@ -1,319 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/column_spec.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.automl_v1beta1.proto import ( - data_stats_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__stats__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - data_types_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__types__pb2, -) -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/column_spec.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n3google/cloud/automl_v1beta1/proto/column_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_stats.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_types.proto\x1a\x1cgoogle/api/annotations.proto"\x9b\x04\n\nColumnSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\tdata_type\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataType\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12:\n\ndata_stats\x18\x04 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats\x12X\n\x16top_correlated_columns\x18\x05 \x03(\x0b\x32\x38.google.cloud.automl.v1beta1.ColumnSpec.CorrelatedColumn\x12\x0c\n\x04\x65tag\x18\x06 \x01(\t\x1at\n\x10\x43orrelatedColumn\x12\x16\n\x0e\x63olumn_spec_id\x18\x01 \x01(\t\x12H\n\x11\x63orrelation_stats\x18\x02 \x01(\x0b\x32-.google.cloud.automl.v1beta1.CorrelationStats:\x94\x01\xea\x41\x90\x01\n automl.googleapis.com/ColumnSpec\x12lprojects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}B\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__stats__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__types__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_COLUMNSPEC_CORRELATEDCOLUMN = _descriptor.Descriptor( - name="CorrelatedColumn", - full_name="google.cloud.automl.v1beta1.ColumnSpec.CorrelatedColumn", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="column_spec_id", - full_name="google.cloud.automl.v1beta1.ColumnSpec.CorrelatedColumn.column_spec_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="correlation_stats", - full_name="google.cloud.automl.v1beta1.ColumnSpec.CorrelatedColumn.correlation_stats", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=518, - serialized_end=634, -) - -_COLUMNSPEC = _descriptor.Descriptor( - name="ColumnSpec", - full_name="google.cloud.automl.v1beta1.ColumnSpec", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.ColumnSpec.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="data_type", - full_name="google.cloud.automl.v1beta1.ColumnSpec.data_type", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.cloud.automl.v1beta1.ColumnSpec.display_name", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="data_stats", - full_name="google.cloud.automl.v1beta1.ColumnSpec.data_stats", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="top_correlated_columns", - full_name="google.cloud.automl.v1beta1.ColumnSpec.top_correlated_columns", - index=4, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="etag", - full_name="google.cloud.automl.v1beta1.ColumnSpec.etag", - index=5, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_COLUMNSPEC_CORRELATEDCOLUMN,], - enum_types=[], - serialized_options=b"\352A\220\001\n automl.googleapis.com/ColumnSpec\022lprojects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=246, - serialized_end=785, -) - -_COLUMNSPEC_CORRELATEDCOLUMN.fields_by_name[ - "correlation_stats" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__stats__pb2._CORRELATIONSTATS -) -_COLUMNSPEC_CORRELATEDCOLUMN.containing_type = _COLUMNSPEC -_COLUMNSPEC.fields_by_name[ - "data_type" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__types__pb2._DATATYPE -) -_COLUMNSPEC.fields_by_name[ - "data_stats" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__stats__pb2._DATASTATS -) -_COLUMNSPEC.fields_by_name[ - "top_correlated_columns" -].message_type = _COLUMNSPEC_CORRELATEDCOLUMN -DESCRIPTOR.message_types_by_name["ColumnSpec"] = _COLUMNSPEC -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ColumnSpec = _reflection.GeneratedProtocolMessageType( - "ColumnSpec", - (_message.Message,), - { - "CorrelatedColumn": _reflection.GeneratedProtocolMessageType( - "CorrelatedColumn", - (_message.Message,), - { - "DESCRIPTOR": _COLUMNSPEC_CORRELATEDCOLUMN, - "__module__": "google.cloud.automl_v1beta1.proto.column_spec_pb2", - "__doc__": """Identifies the table’s column, and its correlation with the column - this ColumnSpec describes. - - Attributes: - column_spec_id: - The column_spec_id of the correlated column, which belongs to - the same table as the in-context column. - correlation_stats: - Correlation between this and the in-context column. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ColumnSpec.CorrelatedColumn) - }, - ), - "DESCRIPTOR": _COLUMNSPEC, - "__module__": "google.cloud.automl_v1beta1.proto.column_spec_pb2", - "__doc__": """A representation of a column in a relational table. When listing them, - column specs are returned in the same order in which they were given - on import . Used by: \* Tables - - Attributes: - name: - Output only. The resource name of the column specs. Form: ``p - rojects/{project_id}/locations/{location_id}/datasets/{dataset - _id}/tableSpecs/{table_spec_id}/columnSpecs/{column_spec_id}`` - data_type: - The data type of elements stored in the column. - display_name: - Output only. The name of the column to show in the interface. - The name can be up to 100 characters long and can consist only - of ASCII Latin letters A-Z and a-z, ASCII digits 0-9, - underscores(_), and forward slashes(/), and must start with a - letter or a digit. - data_stats: - Output only. Stats of the series of values in the column. This - field may be stale, see the ancestor’s - Dataset.tables_dataset_metadata.stats_update_time field for - the timestamp at which these stats were last updated. - top_correlated_columns: - Deprecated. - etag: - Used to perform consistent read-modify-write updates. If not - set, a blind “overwrite” update happens. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ColumnSpec) - }, -) -_sym_db.RegisterMessage(ColumnSpec) -_sym_db.RegisterMessage(ColumnSpec.CorrelatedColumn) - - -DESCRIPTOR._options = None -_COLUMNSPEC._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/column_spec_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/column_spec_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/column_spec_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/data_items.proto b/google/cloud/automl_v1beta1/proto/data_items.proto deleted file mode 100644 index 9b9187ad..00000000 --- a/google/cloud/automl_v1beta1/proto/data_items.proto +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/cloud/automl/v1beta1/geometry.proto"; -import "google/cloud/automl/v1beta1/io.proto"; -import "google/cloud/automl/v1beta1/temporal.proto"; -import "google/cloud/automl/v1beta1/text_segment.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// A representation of an image. -// Only images up to 30MB in size are supported. -message Image { - // Input only. The data representing the image. - // For Predict calls [image_bytes][google.cloud.automl.v1beta1.Image.image_bytes] must be set, as other options are not - // currently supported by prediction API. You can read the contents of an - // uploaded image by using the [content_uri][google.cloud.automl.v1beta1.Image.content_uri] field. - oneof data { - // Image content represented as a stream of bytes. - // Note: As with all `bytes` fields, protobuffers use a pure binary - // representation, whereas JSON representations use base64. - bytes image_bytes = 1; - - // An input config specifying the content of the image. - InputConfig input_config = 6; - } - - // Output only. HTTP URI to the thumbnail image. - string thumbnail_uri = 4; -} - -// A representation of a text snippet. -message TextSnippet { - // Required. The content of the text snippet as a string. Up to 250000 - // characters long. - string content = 1; - - // Optional. The format of [content][google.cloud.automl.v1beta1.TextSnippet.content]. Currently the only two allowed - // values are "text/html" and "text/plain". If left blank, the format is - // automatically determined from the type of the uploaded [content][google.cloud.automl.v1beta1.TextSnippet.content]. - string mime_type = 2; - - // Output only. HTTP URI where you can download the content. - string content_uri = 4; -} - -// Message that describes dimension of a document. -message DocumentDimensions { - // Unit of the document dimension. - enum DocumentDimensionUnit { - // Should not be used. - DOCUMENT_DIMENSION_UNIT_UNSPECIFIED = 0; - - // Document dimension is measured in inches. - INCH = 1; - - // Document dimension is measured in centimeters. - CENTIMETER = 2; - - // Document dimension is measured in points. 72 points = 1 inch. - POINT = 3; - } - - // Unit of the dimension. - DocumentDimensionUnit unit = 1; - - // Width value of the document, works together with the unit. - float width = 2; - - // Height value of the document, works together with the unit. - float height = 3; -} - -// A structured text document e.g. a PDF. -message Document { - // Describes the layout information of a [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in the document. - message Layout { - // The type of TextSegment in the context of the original document. - enum TextSegmentType { - // Should not be used. - TEXT_SEGMENT_TYPE_UNSPECIFIED = 0; - - // The text segment is a token. e.g. word. - TOKEN = 1; - - // The text segment is a paragraph. - PARAGRAPH = 2; - - // The text segment is a form field. - FORM_FIELD = 3; - - // The text segment is the name part of a form field. It will be treated - // as child of another FORM_FIELD TextSegment if its span is subspan of - // another TextSegment with type FORM_FIELD. - FORM_FIELD_NAME = 4; - - // The text segment is the text content part of a form field. It will be - // treated as child of another FORM_FIELD TextSegment if its span is - // subspan of another TextSegment with type FORM_FIELD. - FORM_FIELD_CONTENTS = 5; - - // The text segment is a whole table, including headers, and all rows. - TABLE = 6; - - // The text segment is a table's headers. It will be treated as child of - // another TABLE TextSegment if its span is subspan of another TextSegment - // with type TABLE. - TABLE_HEADER = 7; - - // The text segment is a row in table. It will be treated as child of - // another TABLE TextSegment if its span is subspan of another TextSegment - // with type TABLE. - TABLE_ROW = 8; - - // The text segment is a cell in table. It will be treated as child of - // another TABLE_ROW TextSegment if its span is subspan of another - // TextSegment with type TABLE_ROW. - TABLE_CELL = 9; - } - - // Text Segment that represents a segment in - // [document_text][google.cloud.automl.v1beta1.Document.document_text]. - TextSegment text_segment = 1; - - // Page number of the [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in the original document, starts - // from 1. - int32 page_number = 2; - - // The position of the [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in the page. - // Contains exactly 4 - // - // [normalized_vertices][google.cloud.automl.v1beta1.BoundingPoly.normalized_vertices] - // and they are connected by edges in the order provided, which will - // represent a rectangle parallel to the frame. The - // [NormalizedVertex-s][google.cloud.automl.v1beta1.NormalizedVertex] are - // relative to the page. - // Coordinates are based on top-left as point (0,0). - BoundingPoly bounding_poly = 3; - - // The type of the [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in document. - TextSegmentType text_segment_type = 4; - } - - // An input config specifying the content of the document. - DocumentInputConfig input_config = 1; - - // The plain text version of this document. - TextSnippet document_text = 2; - - // Describes the layout of the document. - // Sorted by [page_number][]. - repeated Layout layout = 3; - - // The dimensions of the page in the document. - DocumentDimensions document_dimensions = 4; - - // Number of pages in the document. - int32 page_count = 5; -} - -// A representation of a row in a relational table. -message Row { - // The resource IDs of the column specs describing the columns of the row. - // If set must contain, but possibly in a different order, all input - // feature - // - // [column_spec_ids][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] - // of the Model this row is being passed to. - // Note: The below `values` field must match order of this field, if this - // field is set. - repeated string column_spec_ids = 2; - - // Required. The values of the row cells, given in the same order as the - // column_spec_ids, or, if not set, then in the same order as input - // feature - // - // [column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] - // of the Model this row is being passed to. - repeated google.protobuf.Value values = 3; -} - -// Example data used for training or prediction. -message ExamplePayload { - // Required. Input only. The example data. - oneof payload { - // Example image. - Image image = 1; - - // Example text. - TextSnippet text_snippet = 2; - - // Example document. - Document document = 4; - - // Example relational table row. - Row row = 3; - } -} diff --git a/google/cloud/automl_v1beta1/proto/data_items_pb2.py b/google/cloud/automl_v1beta1/proto/data_items_pb2.py deleted file mode 100644 index 303eb85c..00000000 --- a/google/cloud/automl_v1beta1/proto/data_items_pb2.py +++ /dev/null @@ -1,1089 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/data_items.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.automl_v1beta1.proto import ( - geometry_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_geometry__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - io_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - temporal_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - text_segment_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2, -) -from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/data_items.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/data_items.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x30google/cloud/automl_v1beta1/proto/geometry.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto\x1a\x34google/cloud/automl_v1beta1/proto/text_segment.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1cgoogle/api/annotations.proto"\x7f\n\x05Image\x12\x15\n\x0bimage_bytes\x18\x01 \x01(\x0cH\x00\x12@\n\x0cinput_config\x18\x06 \x01(\x0b\x32(.google.cloud.automl.v1beta1.InputConfigH\x00\x12\x15\n\rthumbnail_uri\x18\x04 \x01(\tB\x06\n\x04\x64\x61ta"F\n\x0bTextSnippet\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\t\x12\x11\n\tmime_type\x18\x02 \x01(\t\x12\x13\n\x0b\x63ontent_uri\x18\x04 \x01(\t"\xef\x01\n\x12\x44ocumentDimensions\x12S\n\x04unit\x18\x01 \x01(\x0e\x32\x45.google.cloud.automl.v1beta1.DocumentDimensions.DocumentDimensionUnit\x12\r\n\x05width\x18\x02 \x01(\x02\x12\x0e\n\x06height\x18\x03 \x01(\x02"e\n\x15\x44ocumentDimensionUnit\x12\'\n#DOCUMENT_DIMENSION_UNIT_UNSPECIFIED\x10\x00\x12\x08\n\x04INCH\x10\x01\x12\x0e\n\nCENTIMETER\x10\x02\x12\t\n\x05POINT\x10\x03"\xf9\x05\n\x08\x44ocument\x12\x46\n\x0cinput_config\x18\x01 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.DocumentInputConfig\x12?\n\rdocument_text\x18\x02 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippet\x12<\n\x06layout\x18\x03 \x03(\x0b\x32,.google.cloud.automl.v1beta1.Document.Layout\x12L\n\x13\x64ocument_dimensions\x18\x04 \x01(\x0b\x32/.google.cloud.automl.v1beta1.DocumentDimensions\x12\x12\n\npage_count\x18\x05 \x01(\x05\x1a\xc3\x03\n\x06Layout\x12>\n\x0ctext_segment\x18\x01 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSegment\x12\x13\n\x0bpage_number\x18\x02 \x01(\x05\x12@\n\rbounding_poly\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12W\n\x11text_segment_type\x18\x04 \x01(\x0e\x32<.google.cloud.automl.v1beta1.Document.Layout.TextSegmentType"\xc8\x01\n\x0fTextSegmentType\x12!\n\x1dTEXT_SEGMENT_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05TOKEN\x10\x01\x12\r\n\tPARAGRAPH\x10\x02\x12\x0e\n\nFORM_FIELD\x10\x03\x12\x13\n\x0f\x46ORM_FIELD_NAME\x10\x04\x12\x17\n\x13\x46ORM_FIELD_CONTENTS\x10\x05\x12\t\n\x05TABLE\x10\x06\x12\x10\n\x0cTABLE_HEADER\x10\x07\x12\r\n\tTABLE_ROW\x10\x08\x12\x0e\n\nTABLE_CELL\x10\t"F\n\x03Row\x12\x17\n\x0f\x63olumn_spec_ids\x18\x02 \x03(\t\x12&\n\x06values\x18\x03 \x03(\x0b\x32\x16.google.protobuf.Value"\xfe\x01\n\x0e\x45xamplePayload\x12\x33\n\x05image\x18\x01 \x01(\x0b\x32".google.cloud.automl.v1beta1.ImageH\x00\x12@\n\x0ctext_snippet\x18\x02 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippetH\x00\x12\x39\n\x08\x64ocument\x18\x04 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DocumentH\x00\x12/\n\x03row\x18\x03 \x01(\x0b\x32 .google.cloud.automl.v1beta1.RowH\x00\x42\t\n\x07payloadB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_geometry__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2.DESCRIPTOR, - google_dot_protobuf_dot_any__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_DOCUMENTDIMENSIONS_DOCUMENTDIMENSIONUNIT = _descriptor.EnumDescriptor( - name="DocumentDimensionUnit", - full_name="google.cloud.automl.v1beta1.DocumentDimensions.DocumentDimensionUnit", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="DOCUMENT_DIMENSION_UNIT_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="INCH", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CENTIMETER", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="POINT", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=740, - serialized_end=841, -) -_sym_db.RegisterEnumDescriptor(_DOCUMENTDIMENSIONS_DOCUMENTDIMENSIONUNIT) - -_DOCUMENT_LAYOUT_TEXTSEGMENTTYPE = _descriptor.EnumDescriptor( - name="TextSegmentType", - full_name="google.cloud.automl.v1beta1.Document.Layout.TextSegmentType", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="TEXT_SEGMENT_TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="TOKEN", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PARAGRAPH", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FORM_FIELD", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FORM_FIELD_NAME", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FORM_FIELD_CONTENTS", - index=5, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="TABLE", - index=6, - number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="TABLE_HEADER", - index=7, - number=7, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="TABLE_ROW", - index=8, - number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="TABLE_CELL", - index=9, - number=9, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1405, - serialized_end=1605, -) -_sym_db.RegisterEnumDescriptor(_DOCUMENT_LAYOUT_TEXTSEGMENTTYPE) - - -_IMAGE = _descriptor.Descriptor( - name="Image", - full_name="google.cloud.automl.v1beta1.Image", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="image_bytes", - full_name="google.cloud.automl.v1beta1.Image.image_bytes", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="input_config", - full_name="google.cloud.automl.v1beta1.Image.input_config", - index=1, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="thumbnail_uri", - full_name="google.cloud.automl.v1beta1.Image.thumbnail_uri", - index=2, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="data", - full_name="google.cloud.automl.v1beta1.Image.data", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=400, - serialized_end=527, -) - - -_TEXTSNIPPET = _descriptor.Descriptor( - name="TextSnippet", - full_name="google.cloud.automl.v1beta1.TextSnippet", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="content", - full_name="google.cloud.automl.v1beta1.TextSnippet.content", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="mime_type", - full_name="google.cloud.automl.v1beta1.TextSnippet.mime_type", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="content_uri", - full_name="google.cloud.automl.v1beta1.TextSnippet.content_uri", - index=2, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=529, - serialized_end=599, -) - - -_DOCUMENTDIMENSIONS = _descriptor.Descriptor( - name="DocumentDimensions", - full_name="google.cloud.automl.v1beta1.DocumentDimensions", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="unit", - full_name="google.cloud.automl.v1beta1.DocumentDimensions.unit", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="width", - full_name="google.cloud.automl.v1beta1.DocumentDimensions.width", - index=1, - number=2, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="height", - full_name="google.cloud.automl.v1beta1.DocumentDimensions.height", - index=2, - number=3, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_DOCUMENTDIMENSIONS_DOCUMENTDIMENSIONUNIT,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=602, - serialized_end=841, -) - - -_DOCUMENT_LAYOUT = _descriptor.Descriptor( - name="Layout", - full_name="google.cloud.automl.v1beta1.Document.Layout", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="text_segment", - full_name="google.cloud.automl.v1beta1.Document.Layout.text_segment", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_number", - full_name="google.cloud.automl.v1beta1.Document.Layout.page_number", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="bounding_poly", - full_name="google.cloud.automl.v1beta1.Document.Layout.bounding_poly", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="text_segment_type", - full_name="google.cloud.automl.v1beta1.Document.Layout.text_segment_type", - index=3, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_DOCUMENT_LAYOUT_TEXTSEGMENTTYPE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1154, - serialized_end=1605, -) - -_DOCUMENT = _descriptor.Descriptor( - name="Document", - full_name="google.cloud.automl.v1beta1.Document", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="input_config", - full_name="google.cloud.automl.v1beta1.Document.input_config", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="document_text", - full_name="google.cloud.automl.v1beta1.Document.document_text", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="layout", - full_name="google.cloud.automl.v1beta1.Document.layout", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="document_dimensions", - full_name="google.cloud.automl.v1beta1.Document.document_dimensions", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_count", - full_name="google.cloud.automl.v1beta1.Document.page_count", - index=4, - number=5, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_DOCUMENT_LAYOUT,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=844, - serialized_end=1605, -) - - -_ROW = _descriptor.Descriptor( - name="Row", - full_name="google.cloud.automl.v1beta1.Row", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="column_spec_ids", - full_name="google.cloud.automl.v1beta1.Row.column_spec_ids", - index=0, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="values", - full_name="google.cloud.automl.v1beta1.Row.values", - index=1, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1607, - serialized_end=1677, -) - - -_EXAMPLEPAYLOAD = _descriptor.Descriptor( - name="ExamplePayload", - full_name="google.cloud.automl.v1beta1.ExamplePayload", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="image", - full_name="google.cloud.automl.v1beta1.ExamplePayload.image", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="text_snippet", - full_name="google.cloud.automl.v1beta1.ExamplePayload.text_snippet", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="document", - full_name="google.cloud.automl.v1beta1.ExamplePayload.document", - index=2, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row", - full_name="google.cloud.automl.v1beta1.ExamplePayload.row", - index=3, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="payload", - full_name="google.cloud.automl.v1beta1.ExamplePayload.payload", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1680, - serialized_end=1934, -) - -_IMAGE.fields_by_name[ - "input_config" -].message_type = google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2._INPUTCONFIG -_IMAGE.oneofs_by_name["data"].fields.append(_IMAGE.fields_by_name["image_bytes"]) -_IMAGE.fields_by_name["image_bytes"].containing_oneof = _IMAGE.oneofs_by_name["data"] -_IMAGE.oneofs_by_name["data"].fields.append(_IMAGE.fields_by_name["input_config"]) -_IMAGE.fields_by_name["input_config"].containing_oneof = _IMAGE.oneofs_by_name["data"] -_DOCUMENTDIMENSIONS.fields_by_name[ - "unit" -].enum_type = _DOCUMENTDIMENSIONS_DOCUMENTDIMENSIONUNIT -_DOCUMENTDIMENSIONS_DOCUMENTDIMENSIONUNIT.containing_type = _DOCUMENTDIMENSIONS -_DOCUMENT_LAYOUT.fields_by_name[ - "text_segment" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2._TEXTSEGMENT -) -_DOCUMENT_LAYOUT.fields_by_name[ - "bounding_poly" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_geometry__pb2._BOUNDINGPOLY -) -_DOCUMENT_LAYOUT.fields_by_name[ - "text_segment_type" -].enum_type = _DOCUMENT_LAYOUT_TEXTSEGMENTTYPE -_DOCUMENT_LAYOUT.containing_type = _DOCUMENT -_DOCUMENT_LAYOUT_TEXTSEGMENTTYPE.containing_type = _DOCUMENT_LAYOUT -_DOCUMENT.fields_by_name[ - "input_config" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2._DOCUMENTINPUTCONFIG -) -_DOCUMENT.fields_by_name["document_text"].message_type = _TEXTSNIPPET -_DOCUMENT.fields_by_name["layout"].message_type = _DOCUMENT_LAYOUT -_DOCUMENT.fields_by_name["document_dimensions"].message_type = _DOCUMENTDIMENSIONS -_ROW.fields_by_name["values"].message_type = google_dot_protobuf_dot_struct__pb2._VALUE -_EXAMPLEPAYLOAD.fields_by_name["image"].message_type = _IMAGE -_EXAMPLEPAYLOAD.fields_by_name["text_snippet"].message_type = _TEXTSNIPPET -_EXAMPLEPAYLOAD.fields_by_name["document"].message_type = _DOCUMENT -_EXAMPLEPAYLOAD.fields_by_name["row"].message_type = _ROW -_EXAMPLEPAYLOAD.oneofs_by_name["payload"].fields.append( - _EXAMPLEPAYLOAD.fields_by_name["image"] -) -_EXAMPLEPAYLOAD.fields_by_name[ - "image" -].containing_oneof = _EXAMPLEPAYLOAD.oneofs_by_name["payload"] -_EXAMPLEPAYLOAD.oneofs_by_name["payload"].fields.append( - _EXAMPLEPAYLOAD.fields_by_name["text_snippet"] -) -_EXAMPLEPAYLOAD.fields_by_name[ - "text_snippet" -].containing_oneof = _EXAMPLEPAYLOAD.oneofs_by_name["payload"] -_EXAMPLEPAYLOAD.oneofs_by_name["payload"].fields.append( - _EXAMPLEPAYLOAD.fields_by_name["document"] -) -_EXAMPLEPAYLOAD.fields_by_name[ - "document" -].containing_oneof = _EXAMPLEPAYLOAD.oneofs_by_name["payload"] -_EXAMPLEPAYLOAD.oneofs_by_name["payload"].fields.append( - _EXAMPLEPAYLOAD.fields_by_name["row"] -) -_EXAMPLEPAYLOAD.fields_by_name["row"].containing_oneof = _EXAMPLEPAYLOAD.oneofs_by_name[ - "payload" -] -DESCRIPTOR.message_types_by_name["Image"] = _IMAGE -DESCRIPTOR.message_types_by_name["TextSnippet"] = _TEXTSNIPPET -DESCRIPTOR.message_types_by_name["DocumentDimensions"] = _DOCUMENTDIMENSIONS -DESCRIPTOR.message_types_by_name["Document"] = _DOCUMENT -DESCRIPTOR.message_types_by_name["Row"] = _ROW -DESCRIPTOR.message_types_by_name["ExamplePayload"] = _EXAMPLEPAYLOAD -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Image = _reflection.GeneratedProtocolMessageType( - "Image", - (_message.Message,), - { - "DESCRIPTOR": _IMAGE, - "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", - "__doc__": """A representation of an image. Only images up to 30MB in size are - supported. - - Attributes: - data: - Input only. The data representing the image. For Predict calls - [image_bytes][google.cloud.automl.v1beta1.Image.image_bytes] - must be set, as other options are not currently supported by - prediction API. You can read the contents of an uploaded image - by using the - [content_uri][google.cloud.automl.v1beta1.Image.content_uri] - field. - image_bytes: - Image content represented as a stream of bytes. Note: As with - all ``bytes`` fields, protobuffers use a pure binary - representation, whereas JSON representations use base64. - input_config: - An input config specifying the content of the image. - thumbnail_uri: - Output only. HTTP URI to the thumbnail image. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Image) - }, -) -_sym_db.RegisterMessage(Image) - -TextSnippet = _reflection.GeneratedProtocolMessageType( - "TextSnippet", - (_message.Message,), - { - "DESCRIPTOR": _TEXTSNIPPET, - "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", - "__doc__": """A representation of a text snippet. - - Attributes: - content: - Required. The content of the text snippet as a string. Up to - 250000 characters long. - mime_type: - Optional. The format of - [content][google.cloud.automl.v1beta1.TextSnippet.content]. - Currently the only two allowed values are “text/html” and - “text/plain”. If left blank, the format is automatically - determined from the type of the uploaded - [content][google.cloud.automl.v1beta1.TextSnippet.content]. - content_uri: - Output only. HTTP URI where you can download the content. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSnippet) - }, -) -_sym_db.RegisterMessage(TextSnippet) - -DocumentDimensions = _reflection.GeneratedProtocolMessageType( - "DocumentDimensions", - (_message.Message,), - { - "DESCRIPTOR": _DOCUMENTDIMENSIONS, - "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", - "__doc__": """Message that describes dimension of a document. - - Attributes: - unit: - Unit of the dimension. - width: - Width value of the document, works together with the unit. - height: - Height value of the document, works together with the unit. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DocumentDimensions) - }, -) -_sym_db.RegisterMessage(DocumentDimensions) - -Document = _reflection.GeneratedProtocolMessageType( - "Document", - (_message.Message,), - { - "Layout": _reflection.GeneratedProtocolMessageType( - "Layout", - (_message.Message,), - { - "DESCRIPTOR": _DOCUMENT_LAYOUT, - "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", - "__doc__": """Describes the layout information of a [text_segment][google.cloud.auto - ml.v1beta1.Document.Layout.text_segment] in the document. - - Attributes: - text_segment: - Text Segment that represents a segment in [document_text][goog - le.cloud.automl.v1beta1.Document.document_text]. - page_number: - Page number of the [text_segment][google.cloud.automl.v1beta1. - Document.Layout.text_segment] in the original document, starts - from 1. - bounding_poly: - The position of the [text_segment][google.cloud.automl.v1beta1 - .Document.Layout.text_segment] in the page. Contains exactly 4 - [normalized_vertices][google.cloud.automl.v1beta1.BoundingPoly - .normalized_vertices] and they are connected by edges in the - order provided, which will represent a rectangle parallel to - the frame. The [NormalizedVertex-s][google.cloud.automl.v1beta - 1.NormalizedVertex] are relative to the page. Coordinates are - based on top-left as point (0,0). - text_segment_type: - The type of the [text_segment][google.cloud.automl.v1beta1.Doc - ument.Layout.text_segment] in document. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Document.Layout) - }, - ), - "DESCRIPTOR": _DOCUMENT, - "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", - "__doc__": """A structured text document e.g. a PDF. - - Attributes: - input_config: - An input config specifying the content of the document. - document_text: - The plain text version of this document. - layout: - Describes the layout of the document. Sorted by - [page_number][]. - document_dimensions: - The dimensions of the page in the document. - page_count: - Number of pages in the document. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Document) - }, -) -_sym_db.RegisterMessage(Document) -_sym_db.RegisterMessage(Document.Layout) - -Row = _reflection.GeneratedProtocolMessageType( - "Row", - (_message.Message,), - { - "DESCRIPTOR": _ROW, - "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", - "__doc__": """A representation of a row in a relational table. - - Attributes: - column_spec_ids: - The resource IDs of the column specs describing the columns of - the row. If set must contain, but possibly in a different - order, all input feature [column_spec_ids][google.cloud.autom - l.v1beta1.TablesModelMetadata.input_feature_column_specs] of - the Model this row is being passed to. Note: The below - ``values`` field must match order of this field, if this field - is set. - values: - Required. The values of the row cells, given in the same order - as the column_spec_ids, or, if not set, then in the same order - as input feature [column_specs][google.cloud.automl.v1beta1.T - ablesModelMetadata.input_feature_column_specs] of the Model - this row is being passed to. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Row) - }, -) -_sym_db.RegisterMessage(Row) - -ExamplePayload = _reflection.GeneratedProtocolMessageType( - "ExamplePayload", - (_message.Message,), - { - "DESCRIPTOR": _EXAMPLEPAYLOAD, - "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", - "__doc__": """Example data used for training or prediction. - - Attributes: - payload: - Required. Input only. The example data. - image: - Example image. - text_snippet: - Example text. - document: - Example document. - row: - Example relational table row. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExamplePayload) - }, -) -_sym_db.RegisterMessage(ExamplePayload) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/data_items_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/data_items_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/data_items_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/data_stats.proto b/google/cloud/automl_v1beta1/proto/data_stats.proto deleted file mode 100644 index c13a5d45..00000000 --- a/google/cloud/automl_v1beta1/proto/data_stats.proto +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// The data statistics of a series of values that share the same DataType. -message DataStats { - // The data statistics specific to a DataType. - oneof stats { - // The statistics for FLOAT64 DataType. - Float64Stats float64_stats = 3; - - // The statistics for STRING DataType. - StringStats string_stats = 4; - - // The statistics for TIMESTAMP DataType. - TimestampStats timestamp_stats = 5; - - // The statistics for ARRAY DataType. - ArrayStats array_stats = 6; - - // The statistics for STRUCT DataType. - StructStats struct_stats = 7; - - // The statistics for CATEGORY DataType. - CategoryStats category_stats = 8; - } - - // The number of distinct values. - int64 distinct_value_count = 1; - - // The number of values that are null. - int64 null_value_count = 2; - - // The number of values that are valid. - int64 valid_value_count = 9; -} - -// The data statistics of a series of FLOAT64 values. -message Float64Stats { - // A bucket of a histogram. - message HistogramBucket { - // The minimum value of the bucket, inclusive. - double min = 1; - - // The maximum value of the bucket, exclusive unless max = `"Infinity"`, in - // which case it's inclusive. - double max = 2; - - // The number of data values that are in the bucket, i.e. are between - // min and max values. - int64 count = 3; - } - - // The mean of the series. - double mean = 1; - - // The standard deviation of the series. - double standard_deviation = 2; - - // Ordered from 0 to k k-quantile values of the data series of n values. - // The value at index i is, approximately, the i*n/k-th smallest value in the - // series; for i = 0 and i = k these are, respectively, the min and max - // values. - repeated double quantiles = 3; - - // Histogram buckets of the data series. Sorted by the min value of the - // bucket, ascendingly, and the number of the buckets is dynamically - // generated. The buckets are non-overlapping and completely cover whole - // FLOAT64 range with min of first bucket being `"-Infinity"`, and max of - // the last one being `"Infinity"`. - repeated HistogramBucket histogram_buckets = 4; -} - -// The data statistics of a series of STRING values. -message StringStats { - // The statistics of a unigram. - message UnigramStats { - // The unigram. - string value = 1; - - // The number of occurrences of this unigram in the series. - int64 count = 2; - } - - // The statistics of the top 20 unigrams, ordered by - // [count][google.cloud.automl.v1beta1.StringStats.UnigramStats.count]. - repeated UnigramStats top_unigram_stats = 1; -} - -// The data statistics of a series of TIMESTAMP values. -message TimestampStats { - // Stats split by a defined in context granularity. - message GranularStats { - // A map from granularity key to example count for that key. - // E.g. for hour_of_day `13` means 1pm, or for month_of_year `5` means May). - map buckets = 1; - } - - // The string key is the pre-defined granularity. Currently supported: - // hour_of_day, day_of_week, month_of_year. - // Granularities finer that the granularity of timestamp data are not - // populated (e.g. if timestamps are at day granularity, then hour_of_day - // is not populated). - map granular_stats = 1; -} - -// The data statistics of a series of ARRAY values. -message ArrayStats { - // Stats of all the values of all arrays, as if they were a single long - // series of data. The type depends on the element type of the array. - DataStats member_stats = 2; -} - -// The data statistics of a series of STRUCT values. -message StructStats { - // Map from a field name of the struct to data stats aggregated over series - // of all data in that field across all the structs. - map field_stats = 1; -} - -// The data statistics of a series of CATEGORY values. -message CategoryStats { - // The statistics of a single CATEGORY value. - message SingleCategoryStats { - // The CATEGORY value. - string value = 1; - - // The number of occurrences of this value in the series. - int64 count = 2; - } - - // The statistics of the top 20 CATEGORY values, ordered by - // - // [count][google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats.count]. - repeated SingleCategoryStats top_category_stats = 1; -} - -// A correlation statistics between two series of DataType values. The series -// may have differing DataType-s, but within a single series the DataType must -// be the same. -message CorrelationStats { - // The correlation value using the Cramer's V measure. - double cramers_v = 1; -} diff --git a/google/cloud/automl_v1beta1/proto/data_stats_pb2.py b/google/cloud/automl_v1beta1/proto/data_stats_pb2.py deleted file mode 100644 index dc31756b..00000000 --- a/google/cloud/automl_v1beta1/proto/data_stats_pb2.py +++ /dev/null @@ -1,1361 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/data_stats.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/data_stats.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/data_stats.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\xfd\x03\n\tDataStats\x12\x42\n\rfloat64_stats\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.Float64StatsH\x00\x12@\n\x0cstring_stats\x18\x04 \x01(\x0b\x32(.google.cloud.automl.v1beta1.StringStatsH\x00\x12\x46\n\x0ftimestamp_stats\x18\x05 \x01(\x0b\x32+.google.cloud.automl.v1beta1.TimestampStatsH\x00\x12>\n\x0b\x61rray_stats\x18\x06 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ArrayStatsH\x00\x12@\n\x0cstruct_stats\x18\x07 \x01(\x0b\x32(.google.cloud.automl.v1beta1.StructStatsH\x00\x12\x44\n\x0e\x63\x61tegory_stats\x18\x08 \x01(\x0b\x32*.google.cloud.automl.v1beta1.CategoryStatsH\x00\x12\x1c\n\x14\x64istinct_value_count\x18\x01 \x01(\x03\x12\x18\n\x10null_value_count\x18\x02 \x01(\x03\x12\x19\n\x11valid_value_count\x18\t \x01(\x03\x42\x07\n\x05stats"\xdd\x01\n\x0c\x46loat64Stats\x12\x0c\n\x04mean\x18\x01 \x01(\x01\x12\x1a\n\x12standard_deviation\x18\x02 \x01(\x01\x12\x11\n\tquantiles\x18\x03 \x03(\x01\x12T\n\x11histogram_buckets\x18\x04 \x03(\x0b\x32\x39.google.cloud.automl.v1beta1.Float64Stats.HistogramBucket\x1a:\n\x0fHistogramBucket\x12\x0b\n\x03min\x18\x01 \x01(\x01\x12\x0b\n\x03max\x18\x02 \x01(\x01\x12\r\n\x05\x63ount\x18\x03 \x01(\x03"\x8d\x01\n\x0bStringStats\x12P\n\x11top_unigram_stats\x18\x01 \x03(\x0b\x32\x35.google.cloud.automl.v1beta1.StringStats.UnigramStats\x1a,\n\x0cUnigramStats\x12\r\n\x05value\x18\x01 \x01(\t\x12\r\n\x05\x63ount\x18\x02 \x01(\x03"\xf4\x02\n\x0eTimestampStats\x12V\n\x0egranular_stats\x18\x01 \x03(\x0b\x32>.google.cloud.automl.v1beta1.TimestampStats.GranularStatsEntry\x1a\x98\x01\n\rGranularStats\x12W\n\x07\x62uckets\x18\x01 \x03(\x0b\x32\x46.google.cloud.automl.v1beta1.TimestampStats.GranularStats.BucketsEntry\x1a.\n\x0c\x42ucketsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1ao\n\x12GranularStatsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12H\n\x05value\x18\x02 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TimestampStats.GranularStats:\x02\x38\x01"J\n\nArrayStats\x12<\n\x0cmember_stats\x18\x02 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats"\xb7\x01\n\x0bStructStats\x12M\n\x0b\x66ield_stats\x18\x01 \x03(\x0b\x32\x38.google.cloud.automl.v1beta1.StructStats.FieldStatsEntry\x1aY\n\x0f\x46ieldStatsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats:\x02\x38\x01"\xa0\x01\n\rCategoryStats\x12Z\n\x12top_category_stats\x18\x01 \x03(\x0b\x32>.google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats\x1a\x33\n\x13SingleCategoryStats\x12\r\n\x05value\x18\x01 \x01(\t\x12\r\n\x05\x63ount\x18\x02 \x01(\x03"%\n\x10\x43orrelationStats\x12\x11\n\tcramers_v\x18\x01 \x01(\x01\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], -) - - -_DATASTATS = _descriptor.Descriptor( - name="DataStats", - full_name="google.cloud.automl.v1beta1.DataStats", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="float64_stats", - full_name="google.cloud.automl.v1beta1.DataStats.float64_stats", - index=0, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="string_stats", - full_name="google.cloud.automl.v1beta1.DataStats.string_stats", - index=1, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="timestamp_stats", - full_name="google.cloud.automl.v1beta1.DataStats.timestamp_stats", - index=2, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="array_stats", - full_name="google.cloud.automl.v1beta1.DataStats.array_stats", - index=3, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="struct_stats", - full_name="google.cloud.automl.v1beta1.DataStats.struct_stats", - index=4, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="category_stats", - full_name="google.cloud.automl.v1beta1.DataStats.category_stats", - index=5, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="distinct_value_count", - full_name="google.cloud.automl.v1beta1.DataStats.distinct_value_count", - index=6, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="null_value_count", - full_name="google.cloud.automl.v1beta1.DataStats.null_value_count", - index=7, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="valid_value_count", - full_name="google.cloud.automl.v1beta1.DataStats.valid_value_count", - index=8, - number=9, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="stats", - full_name="google.cloud.automl.v1beta1.DataStats.stats", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=114, - serialized_end=623, -) - - -_FLOAT64STATS_HISTOGRAMBUCKET = _descriptor.Descriptor( - name="HistogramBucket", - full_name="google.cloud.automl.v1beta1.Float64Stats.HistogramBucket", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="min", - full_name="google.cloud.automl.v1beta1.Float64Stats.HistogramBucket.min", - index=0, - number=1, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="max", - full_name="google.cloud.automl.v1beta1.Float64Stats.HistogramBucket.max", - index=1, - number=2, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="count", - full_name="google.cloud.automl.v1beta1.Float64Stats.HistogramBucket.count", - index=2, - number=3, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=789, - serialized_end=847, -) - -_FLOAT64STATS = _descriptor.Descriptor( - name="Float64Stats", - full_name="google.cloud.automl.v1beta1.Float64Stats", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="mean", - full_name="google.cloud.automl.v1beta1.Float64Stats.mean", - index=0, - number=1, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="standard_deviation", - full_name="google.cloud.automl.v1beta1.Float64Stats.standard_deviation", - index=1, - number=2, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="quantiles", - full_name="google.cloud.automl.v1beta1.Float64Stats.quantiles", - index=2, - number=3, - type=1, - cpp_type=5, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="histogram_buckets", - full_name="google.cloud.automl.v1beta1.Float64Stats.histogram_buckets", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_FLOAT64STATS_HISTOGRAMBUCKET,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=626, - serialized_end=847, -) - - -_STRINGSTATS_UNIGRAMSTATS = _descriptor.Descriptor( - name="UnigramStats", - full_name="google.cloud.automl.v1beta1.StringStats.UnigramStats", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.automl.v1beta1.StringStats.UnigramStats.value", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="count", - full_name="google.cloud.automl.v1beta1.StringStats.UnigramStats.count", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=947, - serialized_end=991, -) - -_STRINGSTATS = _descriptor.Descriptor( - name="StringStats", - full_name="google.cloud.automl.v1beta1.StringStats", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="top_unigram_stats", - full_name="google.cloud.automl.v1beta1.StringStats.top_unigram_stats", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_STRINGSTATS_UNIGRAMSTATS,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=850, - serialized_end=991, -) - - -_TIMESTAMPSTATS_GRANULARSTATS_BUCKETSENTRY = _descriptor.Descriptor( - name="BucketsEntry", - full_name="google.cloud.automl.v1beta1.TimestampStats.GranularStats.BucketsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.automl.v1beta1.TimestampStats.GranularStats.BucketsEntry.key", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.automl.v1beta1.TimestampStats.GranularStats.BucketsEntry.value", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1207, - serialized_end=1253, -) - -_TIMESTAMPSTATS_GRANULARSTATS = _descriptor.Descriptor( - name="GranularStats", - full_name="google.cloud.automl.v1beta1.TimestampStats.GranularStats", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="buckets", - full_name="google.cloud.automl.v1beta1.TimestampStats.GranularStats.buckets", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_TIMESTAMPSTATS_GRANULARSTATS_BUCKETSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1101, - serialized_end=1253, -) - -_TIMESTAMPSTATS_GRANULARSTATSENTRY = _descriptor.Descriptor( - name="GranularStatsEntry", - full_name="google.cloud.automl.v1beta1.TimestampStats.GranularStatsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.automl.v1beta1.TimestampStats.GranularStatsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.automl.v1beta1.TimestampStats.GranularStatsEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1255, - serialized_end=1366, -) - -_TIMESTAMPSTATS = _descriptor.Descriptor( - name="TimestampStats", - full_name="google.cloud.automl.v1beta1.TimestampStats", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="granular_stats", - full_name="google.cloud.automl.v1beta1.TimestampStats.granular_stats", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_TIMESTAMPSTATS_GRANULARSTATS, _TIMESTAMPSTATS_GRANULARSTATSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=994, - serialized_end=1366, -) - - -_ARRAYSTATS = _descriptor.Descriptor( - name="ArrayStats", - full_name="google.cloud.automl.v1beta1.ArrayStats", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="member_stats", - full_name="google.cloud.automl.v1beta1.ArrayStats.member_stats", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1368, - serialized_end=1442, -) - - -_STRUCTSTATS_FIELDSTATSENTRY = _descriptor.Descriptor( - name="FieldStatsEntry", - full_name="google.cloud.automl.v1beta1.StructStats.FieldStatsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.automl.v1beta1.StructStats.FieldStatsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.automl.v1beta1.StructStats.FieldStatsEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1539, - serialized_end=1628, -) - -_STRUCTSTATS = _descriptor.Descriptor( - name="StructStats", - full_name="google.cloud.automl.v1beta1.StructStats", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="field_stats", - full_name="google.cloud.automl.v1beta1.StructStats.field_stats", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_STRUCTSTATS_FIELDSTATSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1445, - serialized_end=1628, -) - - -_CATEGORYSTATS_SINGLECATEGORYSTATS = _descriptor.Descriptor( - name="SingleCategoryStats", - full_name="google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats.value", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="count", - full_name="google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats.count", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1740, - serialized_end=1791, -) - -_CATEGORYSTATS = _descriptor.Descriptor( - name="CategoryStats", - full_name="google.cloud.automl.v1beta1.CategoryStats", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="top_category_stats", - full_name="google.cloud.automl.v1beta1.CategoryStats.top_category_stats", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_CATEGORYSTATS_SINGLECATEGORYSTATS,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1631, - serialized_end=1791, -) - - -_CORRELATIONSTATS = _descriptor.Descriptor( - name="CorrelationStats", - full_name="google.cloud.automl.v1beta1.CorrelationStats", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="cramers_v", - full_name="google.cloud.automl.v1beta1.CorrelationStats.cramers_v", - index=0, - number=1, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1793, - serialized_end=1830, -) - -_DATASTATS.fields_by_name["float64_stats"].message_type = _FLOAT64STATS -_DATASTATS.fields_by_name["string_stats"].message_type = _STRINGSTATS -_DATASTATS.fields_by_name["timestamp_stats"].message_type = _TIMESTAMPSTATS -_DATASTATS.fields_by_name["array_stats"].message_type = _ARRAYSTATS -_DATASTATS.fields_by_name["struct_stats"].message_type = _STRUCTSTATS -_DATASTATS.fields_by_name["category_stats"].message_type = _CATEGORYSTATS -_DATASTATS.oneofs_by_name["stats"].fields.append( - _DATASTATS.fields_by_name["float64_stats"] -) -_DATASTATS.fields_by_name["float64_stats"].containing_oneof = _DATASTATS.oneofs_by_name[ - "stats" -] -_DATASTATS.oneofs_by_name["stats"].fields.append( - _DATASTATS.fields_by_name["string_stats"] -) -_DATASTATS.fields_by_name["string_stats"].containing_oneof = _DATASTATS.oneofs_by_name[ - "stats" -] -_DATASTATS.oneofs_by_name["stats"].fields.append( - _DATASTATS.fields_by_name["timestamp_stats"] -) -_DATASTATS.fields_by_name[ - "timestamp_stats" -].containing_oneof = _DATASTATS.oneofs_by_name["stats"] -_DATASTATS.oneofs_by_name["stats"].fields.append( - _DATASTATS.fields_by_name["array_stats"] -) -_DATASTATS.fields_by_name["array_stats"].containing_oneof = _DATASTATS.oneofs_by_name[ - "stats" -] -_DATASTATS.oneofs_by_name["stats"].fields.append( - _DATASTATS.fields_by_name["struct_stats"] -) -_DATASTATS.fields_by_name["struct_stats"].containing_oneof = _DATASTATS.oneofs_by_name[ - "stats" -] -_DATASTATS.oneofs_by_name["stats"].fields.append( - _DATASTATS.fields_by_name["category_stats"] -) -_DATASTATS.fields_by_name[ - "category_stats" -].containing_oneof = _DATASTATS.oneofs_by_name["stats"] -_FLOAT64STATS_HISTOGRAMBUCKET.containing_type = _FLOAT64STATS -_FLOAT64STATS.fields_by_name[ - "histogram_buckets" -].message_type = _FLOAT64STATS_HISTOGRAMBUCKET -_STRINGSTATS_UNIGRAMSTATS.containing_type = _STRINGSTATS -_STRINGSTATS.fields_by_name[ - "top_unigram_stats" -].message_type = _STRINGSTATS_UNIGRAMSTATS -_TIMESTAMPSTATS_GRANULARSTATS_BUCKETSENTRY.containing_type = ( - _TIMESTAMPSTATS_GRANULARSTATS -) -_TIMESTAMPSTATS_GRANULARSTATS.fields_by_name[ - "buckets" -].message_type = _TIMESTAMPSTATS_GRANULARSTATS_BUCKETSENTRY -_TIMESTAMPSTATS_GRANULARSTATS.containing_type = _TIMESTAMPSTATS -_TIMESTAMPSTATS_GRANULARSTATSENTRY.fields_by_name[ - "value" -].message_type = _TIMESTAMPSTATS_GRANULARSTATS -_TIMESTAMPSTATS_GRANULARSTATSENTRY.containing_type = _TIMESTAMPSTATS -_TIMESTAMPSTATS.fields_by_name[ - "granular_stats" -].message_type = _TIMESTAMPSTATS_GRANULARSTATSENTRY -_ARRAYSTATS.fields_by_name["member_stats"].message_type = _DATASTATS -_STRUCTSTATS_FIELDSTATSENTRY.fields_by_name["value"].message_type = _DATASTATS -_STRUCTSTATS_FIELDSTATSENTRY.containing_type = _STRUCTSTATS -_STRUCTSTATS.fields_by_name["field_stats"].message_type = _STRUCTSTATS_FIELDSTATSENTRY -_CATEGORYSTATS_SINGLECATEGORYSTATS.containing_type = _CATEGORYSTATS -_CATEGORYSTATS.fields_by_name[ - "top_category_stats" -].message_type = _CATEGORYSTATS_SINGLECATEGORYSTATS -DESCRIPTOR.message_types_by_name["DataStats"] = _DATASTATS -DESCRIPTOR.message_types_by_name["Float64Stats"] = _FLOAT64STATS -DESCRIPTOR.message_types_by_name["StringStats"] = _STRINGSTATS -DESCRIPTOR.message_types_by_name["TimestampStats"] = _TIMESTAMPSTATS -DESCRIPTOR.message_types_by_name["ArrayStats"] = _ARRAYSTATS -DESCRIPTOR.message_types_by_name["StructStats"] = _STRUCTSTATS -DESCRIPTOR.message_types_by_name["CategoryStats"] = _CATEGORYSTATS -DESCRIPTOR.message_types_by_name["CorrelationStats"] = _CORRELATIONSTATS -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -DataStats = _reflection.GeneratedProtocolMessageType( - "DataStats", - (_message.Message,), - { - "DESCRIPTOR": _DATASTATS, - "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", - "__doc__": """The data statistics of a series of values that share the same - DataType. - - Attributes: - stats: - The data statistics specific to a DataType. - float64_stats: - The statistics for FLOAT64 DataType. - string_stats: - The statistics for STRING DataType. - timestamp_stats: - The statistics for TIMESTAMP DataType. - array_stats: - The statistics for ARRAY DataType. - struct_stats: - The statistics for STRUCT DataType. - category_stats: - The statistics for CATEGORY DataType. - distinct_value_count: - The number of distinct values. - null_value_count: - The number of values that are null. - valid_value_count: - The number of values that are valid. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DataStats) - }, -) -_sym_db.RegisterMessage(DataStats) - -Float64Stats = _reflection.GeneratedProtocolMessageType( - "Float64Stats", - (_message.Message,), - { - "HistogramBucket": _reflection.GeneratedProtocolMessageType( - "HistogramBucket", - (_message.Message,), - { - "DESCRIPTOR": _FLOAT64STATS_HISTOGRAMBUCKET, - "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", - "__doc__": """A bucket of a histogram. - - Attributes: - min: - The minimum value of the bucket, inclusive. - max: - The maximum value of the bucket, exclusive unless max = - ``"Infinity"``, in which case it’s inclusive. - count: - The number of data values that are in the bucket, i.e. are - between min and max values. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Float64Stats.HistogramBucket) - }, - ), - "DESCRIPTOR": _FLOAT64STATS, - "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", - "__doc__": """The data statistics of a series of FLOAT64 values. - - Attributes: - mean: - The mean of the series. - standard_deviation: - The standard deviation of the series. - quantiles: - Ordered from 0 to k k-quantile values of the data series of n - values. The value at index i is, approximately, the i*n/k-th - smallest value in the series; for i = 0 and i = k these are, - respectively, the min and max values. - histogram_buckets: - Histogram buckets of the data series. Sorted by the min value - of the bucket, ascendingly, and the number of the buckets is - dynamically generated. The buckets are non-overlapping and - completely cover whole FLOAT64 range with min of first bucket - being ``"-Infinity"``, and max of the last one being - ``"Infinity"``. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Float64Stats) - }, -) -_sym_db.RegisterMessage(Float64Stats) -_sym_db.RegisterMessage(Float64Stats.HistogramBucket) - -StringStats = _reflection.GeneratedProtocolMessageType( - "StringStats", - (_message.Message,), - { - "UnigramStats": _reflection.GeneratedProtocolMessageType( - "UnigramStats", - (_message.Message,), - { - "DESCRIPTOR": _STRINGSTATS_UNIGRAMSTATS, - "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", - "__doc__": """The statistics of a unigram. - - Attributes: - value: - The unigram. - count: - The number of occurrences of this unigram in the series. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StringStats.UnigramStats) - }, - ), - "DESCRIPTOR": _STRINGSTATS, - "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", - "__doc__": """The data statistics of a series of STRING values. - - Attributes: - top_unigram_stats: - The statistics of the top 20 unigrams, ordered by [count][goog - le.cloud.automl.v1beta1.StringStats.UnigramStats.count]. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StringStats) - }, -) -_sym_db.RegisterMessage(StringStats) -_sym_db.RegisterMessage(StringStats.UnigramStats) - -TimestampStats = _reflection.GeneratedProtocolMessageType( - "TimestampStats", - (_message.Message,), - { - "GranularStats": _reflection.GeneratedProtocolMessageType( - "GranularStats", - (_message.Message,), - { - "BucketsEntry": _reflection.GeneratedProtocolMessageType( - "BucketsEntry", - (_message.Message,), - { - "DESCRIPTOR": _TIMESTAMPSTATS_GRANULARSTATS_BUCKETSENTRY, - "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TimestampStats.GranularStats.BucketsEntry) - }, - ), - "DESCRIPTOR": _TIMESTAMPSTATS_GRANULARSTATS, - "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", - "__doc__": """Stats split by a defined in context granularity. - - Attributes: - buckets: - A map from granularity key to example count for that key. E.g. - for hour_of_day ``13`` means 1pm, or for month_of_year ``5`` - means May). - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TimestampStats.GranularStats) - }, - ), - "GranularStatsEntry": _reflection.GeneratedProtocolMessageType( - "GranularStatsEntry", - (_message.Message,), - { - "DESCRIPTOR": _TIMESTAMPSTATS_GRANULARSTATSENTRY, - "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TimestampStats.GranularStatsEntry) - }, - ), - "DESCRIPTOR": _TIMESTAMPSTATS, - "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", - "__doc__": """The data statistics of a series of TIMESTAMP values. - - Attributes: - granular_stats: - The string key is the pre-defined granularity. Currently - supported: hour_of_day, day_of_week, month_of_year. - Granularities finer that the granularity of timestamp data are - not populated (e.g. if timestamps are at day granularity, then - hour_of_day is not populated). - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TimestampStats) - }, -) -_sym_db.RegisterMessage(TimestampStats) -_sym_db.RegisterMessage(TimestampStats.GranularStats) -_sym_db.RegisterMessage(TimestampStats.GranularStats.BucketsEntry) -_sym_db.RegisterMessage(TimestampStats.GranularStatsEntry) - -ArrayStats = _reflection.GeneratedProtocolMessageType( - "ArrayStats", - (_message.Message,), - { - "DESCRIPTOR": _ARRAYSTATS, - "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", - "__doc__": """The data statistics of a series of ARRAY values. - - Attributes: - member_stats: - Stats of all the values of all arrays, as if they were a - single long series of data. The type depends on the element - type of the array. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ArrayStats) - }, -) -_sym_db.RegisterMessage(ArrayStats) - -StructStats = _reflection.GeneratedProtocolMessageType( - "StructStats", - (_message.Message,), - { - "FieldStatsEntry": _reflection.GeneratedProtocolMessageType( - "FieldStatsEntry", - (_message.Message,), - { - "DESCRIPTOR": _STRUCTSTATS_FIELDSTATSENTRY, - "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StructStats.FieldStatsEntry) - }, - ), - "DESCRIPTOR": _STRUCTSTATS, - "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", - "__doc__": """The data statistics of a series of STRUCT values. - - Attributes: - field_stats: - Map from a field name of the struct to data stats aggregated - over series of all data in that field across all the structs. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StructStats) - }, -) -_sym_db.RegisterMessage(StructStats) -_sym_db.RegisterMessage(StructStats.FieldStatsEntry) - -CategoryStats = _reflection.GeneratedProtocolMessageType( - "CategoryStats", - (_message.Message,), - { - "SingleCategoryStats": _reflection.GeneratedProtocolMessageType( - "SingleCategoryStats", - (_message.Message,), - { - "DESCRIPTOR": _CATEGORYSTATS_SINGLECATEGORYSTATS, - "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", - "__doc__": """The statistics of a single CATEGORY value. - - Attributes: - value: - The CATEGORY value. - count: - The number of occurrences of this value in the series. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats) - }, - ), - "DESCRIPTOR": _CATEGORYSTATS, - "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", - "__doc__": """The data statistics of a series of CATEGORY values. - - Attributes: - top_category_stats: - The statistics of the top 20 CATEGORY values, ordered by [cou - nt][google.cloud.automl.v1beta1.CategoryStats.SingleCategorySt - ats.count]. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CategoryStats) - }, -) -_sym_db.RegisterMessage(CategoryStats) -_sym_db.RegisterMessage(CategoryStats.SingleCategoryStats) - -CorrelationStats = _reflection.GeneratedProtocolMessageType( - "CorrelationStats", - (_message.Message,), - { - "DESCRIPTOR": _CORRELATIONSTATS, - "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", - "__doc__": """A correlation statistics between two series of DataType values. The - series may have differing DataType-s, but within a single series the - DataType must be the same. - - Attributes: - cramers_v: - The correlation value using the Cramer’s V measure. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CorrelationStats) - }, -) -_sym_db.RegisterMessage(CorrelationStats) - - -DESCRIPTOR._options = None -_TIMESTAMPSTATS_GRANULARSTATS_BUCKETSENTRY._options = None -_TIMESTAMPSTATS_GRANULARSTATSENTRY._options = None -_STRUCTSTATS_FIELDSTATSENTRY._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/data_stats_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/data_stats_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/data_stats_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/data_types.proto b/google/cloud/automl_v1beta1/proto/data_types.proto deleted file mode 100644 index 6f77f56b..00000000 --- a/google/cloud/automl_v1beta1/proto/data_types.proto +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// `TypeCode` is used as a part of -// [DataType][google.cloud.automl.v1beta1.DataType]. -enum TypeCode { - // Not specified. Should not be used. - TYPE_CODE_UNSPECIFIED = 0; - - // Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or - // `"-Infinity"`. - FLOAT64 = 3; - - // Must be between 0AD and 9999AD. Encoded as `string` according to - // [time_format][google.cloud.automl.v1beta1.DataType.time_format], or, if - // that format is not set, then in RFC 3339 `date-time` format, where - // `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). - TIMESTAMP = 4; - - // Encoded as `string`. - STRING = 6; - - // Encoded as `list`, where the list elements are represented according to - // - // [list_element_type][google.cloud.automl.v1beta1.DataType.list_element_type]. - ARRAY = 8; - - // Encoded as `struct`, where field values are represented according to - // [struct_type][google.cloud.automl.v1beta1.DataType.struct_type]. - STRUCT = 9; - - // Values of this type are not further understood by AutoML, - // e.g. AutoML is unable to tell the order of values (as it could with - // FLOAT64), or is unable to say if one value contains another (as it - // could with STRING). - // Encoded as `string` (bytes should be base64-encoded, as described in RFC - // 4648, section 4). - CATEGORY = 10; -} - -// Indicated the type of data that can be stored in a structured data entity -// (e.g. a table). -message DataType { - // Details of DataType-s that need additional specification. - oneof details { - // If [type_code][google.cloud.automl.v1beta1.DataType.type_code] == [ARRAY][google.cloud.automl.v1beta1.TypeCode.ARRAY], - // then `list_element_type` is the type of the elements. - DataType list_element_type = 2; - - // If [type_code][google.cloud.automl.v1beta1.DataType.type_code] == [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT], then `struct_type` - // provides type information for the struct's fields. - StructType struct_type = 3; - - // If [type_code][google.cloud.automl.v1beta1.DataType.type_code] == [TIMESTAMP][google.cloud.automl.v1beta1.TypeCode.TIMESTAMP] - // then `time_format` provides the format in which that time field is - // expressed. The time_format must either be one of: - // * `UNIX_SECONDS` - // * `UNIX_MILLISECONDS` - // * `UNIX_MICROSECONDS` - // * `UNIX_NANOSECONDS` - // (for respectively number of seconds, milliseconds, microseconds and - // nanoseconds since start of the Unix epoch); - // or be written in `strftime` syntax. If time_format is not set, then the - // default format as described on the type_code is used. - string time_format = 5; - } - - // Required. The [TypeCode][google.cloud.automl.v1beta1.TypeCode] for this type. - TypeCode type_code = 1; - - // If true, this DataType can also be `NULL`. In .CSV files `NULL` value is - // expressed as an empty string. - bool nullable = 4; -} - -// `StructType` defines the DataType-s of a [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT] type. -message StructType { - // Unordered map of struct field names to their data types. - // Fields cannot be added or removed via Update. Their names and - // data types are still mutable. - map fields = 1; -} diff --git a/google/cloud/automl_v1beta1/proto/data_types_pb2.py b/google/cloud/automl_v1beta1/proto/data_types_pb2.py deleted file mode 100644 index cb1993a8..00000000 --- a/google/cloud/automl_v1beta1/proto/data_types_pb2.py +++ /dev/null @@ -1,441 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/data_types.proto - -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/data_types.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/data_types.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\xfc\x01\n\x08\x44\x61taType\x12\x42\n\x11list_element_type\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataTypeH\x00\x12>\n\x0bstruct_type\x18\x03 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.StructTypeH\x00\x12\x15\n\x0btime_format\x18\x05 \x01(\tH\x00\x12\x38\n\ttype_code\x18\x01 \x01(\x0e\x32%.google.cloud.automl.v1beta1.TypeCode\x12\x10\n\x08nullable\x18\x04 \x01(\x08\x42\t\n\x07\x64\x65tails"\xa7\x01\n\nStructType\x12\x43\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x33.google.cloud.automl.v1beta1.StructType.FieldsEntry\x1aT\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataType:\x02\x38\x01*r\n\x08TypeCode\x12\x19\n\x15TYPE_CODE_UNSPECIFIED\x10\x00\x12\x0b\n\x07\x46LOAT64\x10\x03\x12\r\n\tTIMESTAMP\x10\x04\x12\n\n\x06STRING\x10\x06\x12\t\n\x05\x41RRAY\x10\x08\x12\n\n\x06STRUCT\x10\t\x12\x0c\n\x08\x43\x41TEGORY\x10\nB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], -) - -_TYPECODE = _descriptor.EnumDescriptor( - name="TypeCode", - full_name="google.cloud.automl.v1beta1.TypeCode", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="TYPE_CODE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FLOAT64", - index=1, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="TIMESTAMP", - index=2, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="STRING", - index=3, - number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ARRAY", - index=4, - number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="STRUCT", - index=5, - number=9, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CATEGORY", - index=6, - number=10, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=538, - serialized_end=652, -) -_sym_db.RegisterEnumDescriptor(_TYPECODE) - -TypeCode = enum_type_wrapper.EnumTypeWrapper(_TYPECODE) -TYPE_CODE_UNSPECIFIED = 0 -FLOAT64 = 3 -TIMESTAMP = 4 -STRING = 6 -ARRAY = 8 -STRUCT = 9 -CATEGORY = 10 - - -_DATATYPE = _descriptor.Descriptor( - name="DataType", - full_name="google.cloud.automl.v1beta1.DataType", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="list_element_type", - full_name="google.cloud.automl.v1beta1.DataType.list_element_type", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="struct_type", - full_name="google.cloud.automl.v1beta1.DataType.struct_type", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="time_format", - full_name="google.cloud.automl.v1beta1.DataType.time_format", - index=2, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="type_code", - full_name="google.cloud.automl.v1beta1.DataType.type_code", - index=3, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="nullable", - full_name="google.cloud.automl.v1beta1.DataType.nullable", - index=4, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="details", - full_name="google.cloud.automl.v1beta1.DataType.details", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=114, - serialized_end=366, -) - - -_STRUCTTYPE_FIELDSENTRY = _descriptor.Descriptor( - name="FieldsEntry", - full_name="google.cloud.automl.v1beta1.StructType.FieldsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.automl.v1beta1.StructType.FieldsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.automl.v1beta1.StructType.FieldsEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=452, - serialized_end=536, -) - -_STRUCTTYPE = _descriptor.Descriptor( - name="StructType", - full_name="google.cloud.automl.v1beta1.StructType", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="fields", - full_name="google.cloud.automl.v1beta1.StructType.fields", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_STRUCTTYPE_FIELDSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=369, - serialized_end=536, -) - -_DATATYPE.fields_by_name["list_element_type"].message_type = _DATATYPE -_DATATYPE.fields_by_name["struct_type"].message_type = _STRUCTTYPE -_DATATYPE.fields_by_name["type_code"].enum_type = _TYPECODE -_DATATYPE.oneofs_by_name["details"].fields.append( - _DATATYPE.fields_by_name["list_element_type"] -) -_DATATYPE.fields_by_name[ - "list_element_type" -].containing_oneof = _DATATYPE.oneofs_by_name["details"] -_DATATYPE.oneofs_by_name["details"].fields.append( - _DATATYPE.fields_by_name["struct_type"] -) -_DATATYPE.fields_by_name["struct_type"].containing_oneof = _DATATYPE.oneofs_by_name[ - "details" -] -_DATATYPE.oneofs_by_name["details"].fields.append( - _DATATYPE.fields_by_name["time_format"] -) -_DATATYPE.fields_by_name["time_format"].containing_oneof = _DATATYPE.oneofs_by_name[ - "details" -] -_STRUCTTYPE_FIELDSENTRY.fields_by_name["value"].message_type = _DATATYPE -_STRUCTTYPE_FIELDSENTRY.containing_type = _STRUCTTYPE -_STRUCTTYPE.fields_by_name["fields"].message_type = _STRUCTTYPE_FIELDSENTRY -DESCRIPTOR.message_types_by_name["DataType"] = _DATATYPE -DESCRIPTOR.message_types_by_name["StructType"] = _STRUCTTYPE -DESCRIPTOR.enum_types_by_name["TypeCode"] = _TYPECODE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -DataType = _reflection.GeneratedProtocolMessageType( - "DataType", - (_message.Message,), - { - "DESCRIPTOR": _DATATYPE, - "__module__": "google.cloud.automl_v1beta1.proto.data_types_pb2", - "__doc__": """Indicated the type of data that can be stored in a structured data - entity (e.g. a table). - - Attributes: - details: - Details of DataType-s that need additional specification. - list_element_type: - If [type_code][google.cloud.automl.v1beta1.DataType.type_code] - == [ARRAY][google.cloud.automl.v1beta1.TypeCode.ARRAY], then - ``list_element_type`` is the type of the elements. - struct_type: - If [type_code][google.cloud.automl.v1beta1.DataType.type_code] - == [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT], then - ``struct_type`` provides type information for the struct’s - fields. - time_format: - If [type_code][google.cloud.automl.v1beta1.DataType.type_code] - == [TIMESTAMP][google.cloud.automl.v1beta1.TypeCode.TIMESTAMP] - then ``time_format`` provides the format in which that time - field is expressed. The time_format must either be one of: \* - ``UNIX_SECONDS`` \* ``UNIX_MILLISECONDS`` \* - ``UNIX_MICROSECONDS`` \* ``UNIX_NANOSECONDS`` (for - respectively number of seconds, milliseconds, microseconds and - nanoseconds since start of the Unix epoch); or be written in - ``strftime`` syntax. If time_format is not set, then the - default format as described on the type_code is used. - type_code: - Required. The [TypeCode][google.cloud.automl.v1beta1.TypeCode] - for this type. - nullable: - If true, this DataType can also be ``NULL``. In .CSV files - ``NULL`` value is expressed as an empty string. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DataType) - }, -) -_sym_db.RegisterMessage(DataType) - -StructType = _reflection.GeneratedProtocolMessageType( - "StructType", - (_message.Message,), - { - "FieldsEntry": _reflection.GeneratedProtocolMessageType( - "FieldsEntry", - (_message.Message,), - { - "DESCRIPTOR": _STRUCTTYPE_FIELDSENTRY, - "__module__": "google.cloud.automl_v1beta1.proto.data_types_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StructType.FieldsEntry) - }, - ), - "DESCRIPTOR": _STRUCTTYPE, - "__module__": "google.cloud.automl_v1beta1.proto.data_types_pb2", - "__doc__": """\ ``StructType`` defines the DataType-s of a - [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT] type. - - Attributes: - fields: - Unordered map of struct field names to their data types. - Fields cannot be added or removed via Update. Their names and - data types are still mutable. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StructType) - }, -) -_sym_db.RegisterMessage(StructType) -_sym_db.RegisterMessage(StructType.FieldsEntry) - - -DESCRIPTOR._options = None -_STRUCTTYPE_FIELDSENTRY._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/data_types_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/data_types_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/data_types_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/dataset.proto b/google/cloud/automl_v1beta1/proto/dataset.proto deleted file mode 100644 index 8d1b8d93..00000000 --- a/google/cloud/automl_v1beta1/proto/dataset.proto +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/api/resource.proto"; -import "google/cloud/automl/v1beta1/image.proto"; -import "google/cloud/automl/v1beta1/tables.proto"; -import "google/cloud/automl/v1beta1/text.proto"; -import "google/cloud/automl/v1beta1/translation.proto"; -import "google/cloud/automl/v1beta1/video.proto"; -import "google/protobuf/timestamp.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// A workspace for solving a single, particular machine learning (ML) problem. -// A workspace contains examples that may be annotated. -message Dataset { - option (google.api.resource) = { - type: "automl.googleapis.com/Dataset" - pattern: "projects/{project}/locations/{location}/datasets/{dataset}" - }; - - // Required. - // The dataset metadata that is specific to the problem type. - oneof dataset_metadata { - // Metadata for a dataset used for translation. - TranslationDatasetMetadata translation_dataset_metadata = 23; - - // Metadata for a dataset used for image classification. - ImageClassificationDatasetMetadata image_classification_dataset_metadata = 24; - - // Metadata for a dataset used for text classification. - TextClassificationDatasetMetadata text_classification_dataset_metadata = 25; - - // Metadata for a dataset used for image object detection. - ImageObjectDetectionDatasetMetadata image_object_detection_dataset_metadata = 26; - - // Metadata for a dataset used for video classification. - VideoClassificationDatasetMetadata video_classification_dataset_metadata = 31; - - // Metadata for a dataset used for video object tracking. - VideoObjectTrackingDatasetMetadata video_object_tracking_dataset_metadata = 29; - - // Metadata for a dataset used for text extraction. - TextExtractionDatasetMetadata text_extraction_dataset_metadata = 28; - - // Metadata for a dataset used for text sentiment. - TextSentimentDatasetMetadata text_sentiment_dataset_metadata = 30; - - // Metadata for a dataset used for Tables. - TablesDatasetMetadata tables_dataset_metadata = 33; - } - - // Output only. The resource name of the dataset. - // Form: `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}` - string name = 1; - - // Required. The name of the dataset to show in the interface. The name can be - // up to 32 characters long and can consist only of ASCII Latin letters A-Z - // and a-z, underscores - // (_), and ASCII digits 0-9. - string display_name = 2; - - // User-provided description of the dataset. The description can be up to - // 25000 characters long. - string description = 3; - - // Output only. The number of examples in the dataset. - int32 example_count = 21; - - // Output only. Timestamp when this dataset was created. - google.protobuf.Timestamp create_time = 14; - - // Used to perform consistent read-modify-write updates. If not set, a blind - // "overwrite" update happens. - string etag = 17; -} diff --git a/google/cloud/automl_v1beta1/proto/dataset_pb2.py b/google/cloud/automl_v1beta1/proto/dataset_pb2.py deleted file mode 100644 index 28aa5238..00000000 --- a/google/cloud/automl_v1beta1/proto/dataset_pb2.py +++ /dev/null @@ -1,533 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/dataset.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.automl_v1beta1.proto import ( - image_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - tables_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - text_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - translation_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_translation__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - video_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_video__pb2, -) -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/dataset.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b"\n/google/cloud/automl_v1beta1/proto/dataset.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a,google/cloud/automl_v1beta1/proto/text.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a-google/cloud/automl_v1beta1/proto/video.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto\"\xce\t\n\x07\x44\x61taset\x12_\n\x1ctranslation_dataset_metadata\x18\x17 \x01(\x0b\x32\x37.google.cloud.automl.v1beta1.TranslationDatasetMetadataH\x00\x12p\n%image_classification_dataset_metadata\x18\x18 \x01(\x0b\x32?.google.cloud.automl.v1beta1.ImageClassificationDatasetMetadataH\x00\x12n\n$text_classification_dataset_metadata\x18\x19 \x01(\x0b\x32>.google.cloud.automl.v1beta1.TextClassificationDatasetMetadataH\x00\x12s\n'image_object_detection_dataset_metadata\x18\x1a \x01(\x0b\x32@.google.cloud.automl.v1beta1.ImageObjectDetectionDatasetMetadataH\x00\x12p\n%video_classification_dataset_metadata\x18\x1f \x01(\x0b\x32?.google.cloud.automl.v1beta1.VideoClassificationDatasetMetadataH\x00\x12q\n&video_object_tracking_dataset_metadata\x18\x1d \x01(\x0b\x32?.google.cloud.automl.v1beta1.VideoObjectTrackingDatasetMetadataH\x00\x12\x66\n text_extraction_dataset_metadata\x18\x1c \x01(\x0b\x32:.google.cloud.automl.v1beta1.TextExtractionDatasetMetadataH\x00\x12\x64\n\x1ftext_sentiment_dataset_metadata\x18\x1e \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TextSentimentDatasetMetadataH\x00\x12U\n\x17tables_dataset_metadata\x18! \x01(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesDatasetMetadataH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\x15\n\rexample_count\x18\x15 \x01(\x05\x12/\n\x0b\x63reate_time\x18\x0e \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04\x65tag\x18\x11 \x01(\t:^\xea\x41[\n\x1d\x61utoml.googleapis.com/Dataset\x12:projects/{project}/locations/{location}/datasets/{dataset}B\x12\n\x10\x64\x61taset_metadataB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3", - dependencies=[ - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_translation__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_video__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_DATASET = _descriptor.Descriptor( - name="Dataset", - full_name="google.cloud.automl.v1beta1.Dataset", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="translation_dataset_metadata", - full_name="google.cloud.automl.v1beta1.Dataset.translation_dataset_metadata", - index=0, - number=23, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="image_classification_dataset_metadata", - full_name="google.cloud.automl.v1beta1.Dataset.image_classification_dataset_metadata", - index=1, - number=24, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="text_classification_dataset_metadata", - full_name="google.cloud.automl.v1beta1.Dataset.text_classification_dataset_metadata", - index=2, - number=25, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="image_object_detection_dataset_metadata", - full_name="google.cloud.automl.v1beta1.Dataset.image_object_detection_dataset_metadata", - index=3, - number=26, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="video_classification_dataset_metadata", - full_name="google.cloud.automl.v1beta1.Dataset.video_classification_dataset_metadata", - index=4, - number=31, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="video_object_tracking_dataset_metadata", - full_name="google.cloud.automl.v1beta1.Dataset.video_object_tracking_dataset_metadata", - index=5, - number=29, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="text_extraction_dataset_metadata", - full_name="google.cloud.automl.v1beta1.Dataset.text_extraction_dataset_metadata", - index=6, - number=28, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="text_sentiment_dataset_metadata", - full_name="google.cloud.automl.v1beta1.Dataset.text_sentiment_dataset_metadata", - index=7, - number=30, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="tables_dataset_metadata", - full_name="google.cloud.automl.v1beta1.Dataset.tables_dataset_metadata", - index=8, - number=33, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.Dataset.name", - index=9, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.cloud.automl.v1beta1.Dataset.display_name", - index=10, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.cloud.automl.v1beta1.Dataset.description", - index=11, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="example_count", - full_name="google.cloud.automl.v1beta1.Dataset.example_count", - index=12, - number=21, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create_time", - full_name="google.cloud.automl.v1beta1.Dataset.create_time", - index=13, - number=14, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="etag", - full_name="google.cloud.automl.v1beta1.Dataset.etag", - index=14, - number=17, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"\352A[\n\035automl.googleapis.com/Dataset\022:projects/{project}/locations/{location}/datasets/{dataset}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="dataset_metadata", - full_name="google.cloud.automl.v1beta1.Dataset.dataset_metadata", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=412, - serialized_end=1642, -) - -_DATASET.fields_by_name[ - "translation_dataset_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_translation__pb2._TRANSLATIONDATASETMETADATA -) -_DATASET.fields_by_name[ - "image_classification_dataset_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2._IMAGECLASSIFICATIONDATASETMETADATA -) -_DATASET.fields_by_name[ - "text_classification_dataset_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__pb2._TEXTCLASSIFICATIONDATASETMETADATA -) -_DATASET.fields_by_name[ - "image_object_detection_dataset_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2._IMAGEOBJECTDETECTIONDATASETMETADATA -) -_DATASET.fields_by_name[ - "video_classification_dataset_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_video__pb2._VIDEOCLASSIFICATIONDATASETMETADATA -) -_DATASET.fields_by_name[ - "video_object_tracking_dataset_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_video__pb2._VIDEOOBJECTTRACKINGDATASETMETADATA -) -_DATASET.fields_by_name[ - "text_extraction_dataset_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__pb2._TEXTEXTRACTIONDATASETMETADATA -) -_DATASET.fields_by_name[ - "text_sentiment_dataset_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__pb2._TEXTSENTIMENTDATASETMETADATA -) -_DATASET.fields_by_name[ - "tables_dataset_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2._TABLESDATASETMETADATA -) -_DATASET.fields_by_name[ - "create_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_DATASET.oneofs_by_name["dataset_metadata"].fields.append( - _DATASET.fields_by_name["translation_dataset_metadata"] -) -_DATASET.fields_by_name[ - "translation_dataset_metadata" -].containing_oneof = _DATASET.oneofs_by_name["dataset_metadata"] -_DATASET.oneofs_by_name["dataset_metadata"].fields.append( - _DATASET.fields_by_name["image_classification_dataset_metadata"] -) -_DATASET.fields_by_name[ - "image_classification_dataset_metadata" -].containing_oneof = _DATASET.oneofs_by_name["dataset_metadata"] -_DATASET.oneofs_by_name["dataset_metadata"].fields.append( - _DATASET.fields_by_name["text_classification_dataset_metadata"] -) -_DATASET.fields_by_name[ - "text_classification_dataset_metadata" -].containing_oneof = _DATASET.oneofs_by_name["dataset_metadata"] -_DATASET.oneofs_by_name["dataset_metadata"].fields.append( - _DATASET.fields_by_name["image_object_detection_dataset_metadata"] -) -_DATASET.fields_by_name[ - "image_object_detection_dataset_metadata" -].containing_oneof = _DATASET.oneofs_by_name["dataset_metadata"] -_DATASET.oneofs_by_name["dataset_metadata"].fields.append( - _DATASET.fields_by_name["video_classification_dataset_metadata"] -) -_DATASET.fields_by_name[ - "video_classification_dataset_metadata" -].containing_oneof = _DATASET.oneofs_by_name["dataset_metadata"] -_DATASET.oneofs_by_name["dataset_metadata"].fields.append( - _DATASET.fields_by_name["video_object_tracking_dataset_metadata"] -) -_DATASET.fields_by_name[ - "video_object_tracking_dataset_metadata" -].containing_oneof = _DATASET.oneofs_by_name["dataset_metadata"] -_DATASET.oneofs_by_name["dataset_metadata"].fields.append( - _DATASET.fields_by_name["text_extraction_dataset_metadata"] -) -_DATASET.fields_by_name[ - "text_extraction_dataset_metadata" -].containing_oneof = _DATASET.oneofs_by_name["dataset_metadata"] -_DATASET.oneofs_by_name["dataset_metadata"].fields.append( - _DATASET.fields_by_name["text_sentiment_dataset_metadata"] -) -_DATASET.fields_by_name[ - "text_sentiment_dataset_metadata" -].containing_oneof = _DATASET.oneofs_by_name["dataset_metadata"] -_DATASET.oneofs_by_name["dataset_metadata"].fields.append( - _DATASET.fields_by_name["tables_dataset_metadata"] -) -_DATASET.fields_by_name[ - "tables_dataset_metadata" -].containing_oneof = _DATASET.oneofs_by_name["dataset_metadata"] -DESCRIPTOR.message_types_by_name["Dataset"] = _DATASET -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Dataset = _reflection.GeneratedProtocolMessageType( - "Dataset", - (_message.Message,), - { - "DESCRIPTOR": _DATASET, - "__module__": "google.cloud.automl_v1beta1.proto.dataset_pb2", - "__doc__": """A workspace for solving a single, particular machine learning (ML) - problem. A workspace contains examples that may be annotated. - - Attributes: - dataset_metadata: - Required. The dataset metadata that is specific to the problem - type. - translation_dataset_metadata: - Metadata for a dataset used for translation. - image_classification_dataset_metadata: - Metadata for a dataset used for image classification. - text_classification_dataset_metadata: - Metadata for a dataset used for text classification. - image_object_detection_dataset_metadata: - Metadata for a dataset used for image object detection. - video_classification_dataset_metadata: - Metadata for a dataset used for video classification. - video_object_tracking_dataset_metadata: - Metadata for a dataset used for video object tracking. - text_extraction_dataset_metadata: - Metadata for a dataset used for text extraction. - text_sentiment_dataset_metadata: - Metadata for a dataset used for text sentiment. - tables_dataset_metadata: - Metadata for a dataset used for Tables. - name: - Output only. The resource name of the dataset. Form: ``project - s/{project_id}/locations/{location_id}/datasets/{dataset_id}`` - display_name: - Required. The name of the dataset to show in the interface. - The name can be up to 32 characters long and can consist only - of ASCII Latin letters A-Z and a-z, underscores (_), and ASCII - digits 0-9. - description: - User-provided description of the dataset. The description can - be up to 25000 characters long. - example_count: - Output only. The number of examples in the dataset. - create_time: - Output only. Timestamp when this dataset was created. - etag: - Used to perform consistent read-modify-write updates. If not - set, a blind “overwrite” update happens. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Dataset) - }, -) -_sym_db.RegisterMessage(Dataset) - - -DESCRIPTOR._options = None -_DATASET._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/dataset_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/dataset_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/dataset_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/detection.proto b/google/cloud/automl_v1beta1/proto/detection.proto deleted file mode 100644 index c5864e20..00000000 --- a/google/cloud/automl_v1beta1/proto/detection.proto +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/cloud/automl/v1beta1/geometry.proto"; -import "google/protobuf/duration.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// Annotation details for image object detection. -message ImageObjectDetectionAnnotation { - // Output only. The rectangle representing the object location. - BoundingPoly bounding_box = 1; - - // Output only. The confidence that this annotation is positive for the parent example, - // value in [0, 1], higher means higher positivity confidence. - float score = 2; -} - -// Annotation details for video object tracking. -message VideoObjectTrackingAnnotation { - // Optional. The instance of the object, expressed as a positive integer. Used to tell - // apart objects of the same type (i.e. AnnotationSpec) when multiple are - // present on a single example. - // NOTE: Instance ID prediction quality is not a part of model evaluation and - // is done as best effort. Especially in cases when an entity goes - // off-screen for a longer time (minutes), when it comes back it may be given - // a new instance ID. - string instance_id = 1; - - // Required. A time (frame) of a video to which this annotation pertains. - // Represented as the duration since the video's start. - google.protobuf.Duration time_offset = 2; - - // Required. The rectangle representing the object location on the frame (i.e. - // at the time_offset of the video). - BoundingPoly bounding_box = 3; - - // Output only. The confidence that this annotation is positive for the video at - // the time_offset, value in [0, 1], higher means higher positivity - // confidence. For annotations created by the user the score is 1. When - // user approves an annotation, the original float score is kept (and not - // changed to 1). - float score = 4; -} - -// Bounding box matching model metrics for a single intersection-over-union -// threshold and multiple label match confidence thresholds. -message BoundingBoxMetricsEntry { - // Metrics for a single confidence threshold. - message ConfidenceMetricsEntry { - // Output only. The confidence threshold value used to compute the metrics. - float confidence_threshold = 1; - - // Output only. Recall under the given confidence threshold. - float recall = 2; - - // Output only. Precision under the given confidence threshold. - float precision = 3; - - // Output only. The harmonic mean of recall and precision. - float f1_score = 4; - } - - // Output only. The intersection-over-union threshold value used to compute - // this metrics entry. - float iou_threshold = 1; - - // Output only. The mean average precision, most often close to au_prc. - float mean_average_precision = 2; - - // Output only. Metrics for each label-match confidence_threshold from - // 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall curve is - // derived from them. - repeated ConfidenceMetricsEntry confidence_metrics_entries = 3; -} - -// Model evaluation metrics for image object detection problems. -// Evaluates prediction quality of labeled bounding boxes. -message ImageObjectDetectionEvaluationMetrics { - // Output only. The total number of bounding boxes (i.e. summed over all - // images) the ground truth used to create this evaluation had. - int32 evaluated_bounding_box_count = 1; - - // Output only. The bounding boxes match metrics for each - // Intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 - // and each label confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 - // pair. - repeated BoundingBoxMetricsEntry bounding_box_metrics_entries = 2; - - // Output only. The single metric for bounding boxes evaluation: - // the mean_average_precision averaged over all bounding_box_metrics_entries. - float bounding_box_mean_average_precision = 3; -} - -// Model evaluation metrics for video object tracking problems. -// Evaluates prediction quality of both labeled bounding boxes and labeled -// tracks (i.e. series of bounding boxes sharing same label and instance ID). -message VideoObjectTrackingEvaluationMetrics { - // Output only. The number of video frames used to create this evaluation. - int32 evaluated_frame_count = 1; - - // Output only. The total number of bounding boxes (i.e. summed over all - // frames) the ground truth used to create this evaluation had. - int32 evaluated_bounding_box_count = 2; - - // Output only. The bounding boxes match metrics for each - // Intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 - // and each label confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 - // pair. - repeated BoundingBoxMetricsEntry bounding_box_metrics_entries = 4; - - // Output only. The single metric for bounding boxes evaluation: - // the mean_average_precision averaged over all bounding_box_metrics_entries. - float bounding_box_mean_average_precision = 6; -} diff --git a/google/cloud/automl_v1beta1/proto/detection_pb2.py b/google/cloud/automl_v1beta1/proto/detection_pb2.py deleted file mode 100644 index 940fac4d..00000000 --- a/google/cloud/automl_v1beta1/proto/detection_pb2.py +++ /dev/null @@ -1,757 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/detection.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.automl_v1beta1.proto import ( - geometry_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_geometry__pb2, -) -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/detection.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n1google/cloud/automl_v1beta1/proto/detection.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x30google/cloud/automl_v1beta1/proto/geometry.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/api/annotations.proto"p\n\x1eImageObjectDetectionAnnotation\x12?\n\x0c\x62ounding_box\x18\x01 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12\r\n\x05score\x18\x02 \x01(\x02"\xb4\x01\n\x1dVideoObjectTrackingAnnotation\x12\x13\n\x0binstance_id\x18\x01 \x01(\t\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12?\n\x0c\x62ounding_box\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12\r\n\x05score\x18\x04 \x01(\x02"\xae\x02\n\x17\x42oundingBoxMetricsEntry\x12\x15\n\riou_threshold\x18\x01 \x01(\x02\x12\x1e\n\x16mean_average_precision\x18\x02 \x01(\x02\x12o\n\x1a\x63onfidence_metrics_entries\x18\x03 \x03(\x0b\x32K.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.ConfidenceMetricsEntry\x1ak\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x11\n\tprecision\x18\x03 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x04 \x01(\x02"\xd6\x01\n%ImageObjectDetectionEvaluationMetrics\x12$\n\x1c\x65valuated_bounding_box_count\x18\x01 \x01(\x05\x12Z\n\x1c\x62ounding_box_metrics_entries\x18\x02 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry\x12+\n#bounding_box_mean_average_precision\x18\x03 \x01(\x02"\xf4\x01\n$VideoObjectTrackingEvaluationMetrics\x12\x1d\n\x15\x65valuated_frame_count\x18\x01 \x01(\x05\x12$\n\x1c\x65valuated_bounding_box_count\x18\x02 \x01(\x05\x12Z\n\x1c\x62ounding_box_metrics_entries\x18\x04 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry\x12+\n#bounding_box_mean_average_precision\x18\x06 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_geometry__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_IMAGEOBJECTDETECTIONANNOTATION = _descriptor.Descriptor( - name="ImageObjectDetectionAnnotation", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionAnnotation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="bounding_box", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionAnnotation.bounding_box", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="score", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionAnnotation.score", - index=1, - number=2, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=194, - serialized_end=306, -) - - -_VIDEOOBJECTTRACKINGANNOTATION = _descriptor.Descriptor( - name="VideoObjectTrackingAnnotation", - full_name="google.cloud.automl.v1beta1.VideoObjectTrackingAnnotation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="instance_id", - full_name="google.cloud.automl.v1beta1.VideoObjectTrackingAnnotation.instance_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="time_offset", - full_name="google.cloud.automl.v1beta1.VideoObjectTrackingAnnotation.time_offset", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="bounding_box", - full_name="google.cloud.automl.v1beta1.VideoObjectTrackingAnnotation.bounding_box", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="score", - full_name="google.cloud.automl.v1beta1.VideoObjectTrackingAnnotation.score", - index=3, - number=4, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=309, - serialized_end=489, -) - - -_BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY = _descriptor.Descriptor( - name="ConfidenceMetricsEntry", - full_name="google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.ConfidenceMetricsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="confidence_threshold", - full_name="google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.ConfidenceMetricsEntry.confidence_threshold", - index=0, - number=1, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="recall", - full_name="google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.ConfidenceMetricsEntry.recall", - index=1, - number=2, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="precision", - full_name="google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.ConfidenceMetricsEntry.precision", - index=2, - number=3, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="f1_score", - full_name="google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.ConfidenceMetricsEntry.f1_score", - index=3, - number=4, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=687, - serialized_end=794, -) - -_BOUNDINGBOXMETRICSENTRY = _descriptor.Descriptor( - name="BoundingBoxMetricsEntry", - full_name="google.cloud.automl.v1beta1.BoundingBoxMetricsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="iou_threshold", - full_name="google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.iou_threshold", - index=0, - number=1, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="mean_average_precision", - full_name="google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.mean_average_precision", - index=1, - number=2, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="confidence_metrics_entries", - full_name="google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.confidence_metrics_entries", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=492, - serialized_end=794, -) - - -_IMAGEOBJECTDETECTIONEVALUATIONMETRICS = _descriptor.Descriptor( - name="ImageObjectDetectionEvaluationMetrics", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionEvaluationMetrics", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="evaluated_bounding_box_count", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionEvaluationMetrics.evaluated_bounding_box_count", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="bounding_box_metrics_entries", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionEvaluationMetrics.bounding_box_metrics_entries", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="bounding_box_mean_average_precision", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionEvaluationMetrics.bounding_box_mean_average_precision", - index=2, - number=3, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=797, - serialized_end=1011, -) - - -_VIDEOOBJECTTRACKINGEVALUATIONMETRICS = _descriptor.Descriptor( - name="VideoObjectTrackingEvaluationMetrics", - full_name="google.cloud.automl.v1beta1.VideoObjectTrackingEvaluationMetrics", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="evaluated_frame_count", - full_name="google.cloud.automl.v1beta1.VideoObjectTrackingEvaluationMetrics.evaluated_frame_count", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="evaluated_bounding_box_count", - full_name="google.cloud.automl.v1beta1.VideoObjectTrackingEvaluationMetrics.evaluated_bounding_box_count", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="bounding_box_metrics_entries", - full_name="google.cloud.automl.v1beta1.VideoObjectTrackingEvaluationMetrics.bounding_box_metrics_entries", - index=2, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="bounding_box_mean_average_precision", - full_name="google.cloud.automl.v1beta1.VideoObjectTrackingEvaluationMetrics.bounding_box_mean_average_precision", - index=3, - number=6, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1014, - serialized_end=1258, -) - -_IMAGEOBJECTDETECTIONANNOTATION.fields_by_name[ - "bounding_box" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_geometry__pb2._BOUNDINGPOLY -) -_VIDEOOBJECTTRACKINGANNOTATION.fields_by_name[ - "time_offset" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_VIDEOOBJECTTRACKINGANNOTATION.fields_by_name[ - "bounding_box" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_geometry__pb2._BOUNDINGPOLY -) -_BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY.containing_type = ( - _BOUNDINGBOXMETRICSENTRY -) -_BOUNDINGBOXMETRICSENTRY.fields_by_name[ - "confidence_metrics_entries" -].message_type = _BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY -_IMAGEOBJECTDETECTIONEVALUATIONMETRICS.fields_by_name[ - "bounding_box_metrics_entries" -].message_type = _BOUNDINGBOXMETRICSENTRY -_VIDEOOBJECTTRACKINGEVALUATIONMETRICS.fields_by_name[ - "bounding_box_metrics_entries" -].message_type = _BOUNDINGBOXMETRICSENTRY -DESCRIPTOR.message_types_by_name[ - "ImageObjectDetectionAnnotation" -] = _IMAGEOBJECTDETECTIONANNOTATION -DESCRIPTOR.message_types_by_name[ - "VideoObjectTrackingAnnotation" -] = _VIDEOOBJECTTRACKINGANNOTATION -DESCRIPTOR.message_types_by_name["BoundingBoxMetricsEntry"] = _BOUNDINGBOXMETRICSENTRY -DESCRIPTOR.message_types_by_name[ - "ImageObjectDetectionEvaluationMetrics" -] = _IMAGEOBJECTDETECTIONEVALUATIONMETRICS -DESCRIPTOR.message_types_by_name[ - "VideoObjectTrackingEvaluationMetrics" -] = _VIDEOOBJECTTRACKINGEVALUATIONMETRICS -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ImageObjectDetectionAnnotation = _reflection.GeneratedProtocolMessageType( - "ImageObjectDetectionAnnotation", - (_message.Message,), - { - "DESCRIPTOR": _IMAGEOBJECTDETECTIONANNOTATION, - "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", - "__doc__": """Annotation details for image object detection. - - Attributes: - bounding_box: - Output only. The rectangle representing the object location. - score: - Output only. The confidence that this annotation is positive - for the parent example, value in [0, 1], higher means higher - positivity confidence. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionAnnotation) - }, -) -_sym_db.RegisterMessage(ImageObjectDetectionAnnotation) - -VideoObjectTrackingAnnotation = _reflection.GeneratedProtocolMessageType( - "VideoObjectTrackingAnnotation", - (_message.Message,), - { - "DESCRIPTOR": _VIDEOOBJECTTRACKINGANNOTATION, - "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", - "__doc__": """Annotation details for video object tracking. - - Attributes: - instance_id: - Optional. The instance of the object, expressed as a positive - integer. Used to tell apart objects of the same type - (i.e. AnnotationSpec) when multiple are present on a single - example. NOTE: Instance ID prediction quality is not a part of - model evaluation and is done as best effort. Especially in - cases when an entity goes off-screen for a longer time - (minutes), when it comes back it may be given a new instance - ID. - time_offset: - Required. A time (frame) of a video to which this annotation - pertains. Represented as the duration since the video’s start. - bounding_box: - Required. The rectangle representing the object location on - the frame ( i.e. at the time_offset of the video). - score: - Output only. The confidence that this annotation is positive - for the video at the time_offset, value in [0, 1], higher - means higher positivity confidence. For annotations created by - the user the score is 1. When user approves an annotation, the - original float score is kept (and not changed to 1). - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingAnnotation) - }, -) -_sym_db.RegisterMessage(VideoObjectTrackingAnnotation) - -BoundingBoxMetricsEntry = _reflection.GeneratedProtocolMessageType( - "BoundingBoxMetricsEntry", - (_message.Message,), - { - "ConfidenceMetricsEntry": _reflection.GeneratedProtocolMessageType( - "ConfidenceMetricsEntry", - (_message.Message,), - { - "DESCRIPTOR": _BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY, - "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", - "__doc__": """Metrics for a single confidence threshold. - - Attributes: - confidence_threshold: - Output only. The confidence threshold value used to compute - the metrics. - recall: - Output only. Recall under the given confidence threshold. - precision: - Output only. Precision under the given confidence threshold. - f1_score: - Output only. The harmonic mean of recall and precision. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.ConfidenceMetricsEntry) - }, - ), - "DESCRIPTOR": _BOUNDINGBOXMETRICSENTRY, - "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", - "__doc__": """Bounding box matching model metrics for a single intersection-over- - union threshold and multiple label match confidence thresholds. - - Attributes: - iou_threshold: - Output only. The intersection-over-union threshold value used - to compute this metrics entry. - mean_average_precision: - Output only. The mean average precision, most often close to - au_prc. - confidence_metrics_entries: - Output only. Metrics for each label-match confidence_threshold - from 0.05,0.10,…,0.95,0.96,0.97,0.98,0.99. Precision-recall - curve is derived from them. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BoundingBoxMetricsEntry) - }, -) -_sym_db.RegisterMessage(BoundingBoxMetricsEntry) -_sym_db.RegisterMessage(BoundingBoxMetricsEntry.ConfidenceMetricsEntry) - -ImageObjectDetectionEvaluationMetrics = _reflection.GeneratedProtocolMessageType( - "ImageObjectDetectionEvaluationMetrics", - (_message.Message,), - { - "DESCRIPTOR": _IMAGEOBJECTDETECTIONEVALUATIONMETRICS, - "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", - "__doc__": """Model evaluation metrics for image object detection problems. - Evaluates prediction quality of labeled bounding boxes. - - Attributes: - evaluated_bounding_box_count: - Output only. The total number of bounding boxes (i.e. summed - over all images) the ground truth used to create this - evaluation had. - bounding_box_metrics_entries: - Output only. The bounding boxes match metrics for each - Intersection-over-union threshold - 0.05,0.10,…,0.95,0.96,0.97,0.98,0.99 and each label confidence - threshold 0.05,0.10,…,0.95,0.96,0.97,0.98,0.99 pair. - bounding_box_mean_average_precision: - Output only. The single metric for bounding boxes evaluation: - the mean_average_precision averaged over all - bounding_box_metrics_entries. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionEvaluationMetrics) - }, -) -_sym_db.RegisterMessage(ImageObjectDetectionEvaluationMetrics) - -VideoObjectTrackingEvaluationMetrics = _reflection.GeneratedProtocolMessageType( - "VideoObjectTrackingEvaluationMetrics", - (_message.Message,), - { - "DESCRIPTOR": _VIDEOOBJECTTRACKINGEVALUATIONMETRICS, - "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", - "__doc__": """Model evaluation metrics for video object tracking problems. Evaluates - prediction quality of both labeled bounding boxes and labeled tracks - (i.e. series of bounding boxes sharing same label and instance ID). - - Attributes: - evaluated_frame_count: - Output only. The number of video frames used to create this - evaluation. - evaluated_bounding_box_count: - Output only. The total number of bounding boxes (i.e. summed - over all frames) the ground truth used to create this - evaluation had. - bounding_box_metrics_entries: - Output only. The bounding boxes match metrics for each - Intersection-over-union threshold - 0.05,0.10,…,0.95,0.96,0.97,0.98,0.99 and each label confidence - threshold 0.05,0.10,…,0.95,0.96,0.97,0.98,0.99 pair. - bounding_box_mean_average_precision: - Output only. The single metric for bounding boxes evaluation: - the mean_average_precision averaged over all - bounding_box_metrics_entries. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingEvaluationMetrics) - }, -) -_sym_db.RegisterMessage(VideoObjectTrackingEvaluationMetrics) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/detection_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/detection_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/detection_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/geometry.proto b/google/cloud/automl_v1beta1/proto/geometry.proto deleted file mode 100644 index d5654aac..00000000 --- a/google/cloud/automl_v1beta1/proto/geometry.proto +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// A vertex represents a 2D point in the image. -// The normalized vertex coordinates are between 0 to 1 fractions relative to -// the original plane (image, video). E.g. if the plane (e.g. whole image) would -// have size 10 x 20 then a point with normalized coordinates (0.1, 0.3) would -// be at the position (1, 6) on that plane. -message NormalizedVertex { - // Required. Horizontal coordinate. - float x = 1; - - // Required. Vertical coordinate. - float y = 2; -} - -// A bounding polygon of a detected object on a plane. -// On output both vertices and normalized_vertices are provided. -// The polygon is formed by connecting vertices in the order they are listed. -message BoundingPoly { - // Output only . The bounding polygon normalized vertices. - repeated NormalizedVertex normalized_vertices = 2; -} diff --git a/google/cloud/automl_v1beta1/proto/geometry_pb2.py b/google/cloud/automl_v1beta1/proto/geometry_pb2.py deleted file mode 100644 index 2d355059..00000000 --- a/google/cloud/automl_v1beta1/proto/geometry_pb2.py +++ /dev/null @@ -1,172 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/geometry.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/geometry.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n0google/cloud/automl_v1beta1/proto/geometry.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"Z\n\x0c\x42oundingPoly\x12J\n\x13normalized_vertices\x18\x02 \x03(\x0b\x32-.google.cloud.automl.v1beta1.NormalizedVertexB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], -) - - -_NORMALIZEDVERTEX = _descriptor.Descriptor( - name="NormalizedVertex", - full_name="google.cloud.automl.v1beta1.NormalizedVertex", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="x", - full_name="google.cloud.automl.v1beta1.NormalizedVertex.x", - index=0, - number=1, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="y", - full_name="google.cloud.automl.v1beta1.NormalizedVertex.y", - index=1, - number=2, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=111, - serialized_end=151, -) - - -_BOUNDINGPOLY = _descriptor.Descriptor( - name="BoundingPoly", - full_name="google.cloud.automl.v1beta1.BoundingPoly", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="normalized_vertices", - full_name="google.cloud.automl.v1beta1.BoundingPoly.normalized_vertices", - index=0, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=153, - serialized_end=243, -) - -_BOUNDINGPOLY.fields_by_name["normalized_vertices"].message_type = _NORMALIZEDVERTEX -DESCRIPTOR.message_types_by_name["NormalizedVertex"] = _NORMALIZEDVERTEX -DESCRIPTOR.message_types_by_name["BoundingPoly"] = _BOUNDINGPOLY -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -NormalizedVertex = _reflection.GeneratedProtocolMessageType( - "NormalizedVertex", - (_message.Message,), - { - "DESCRIPTOR": _NORMALIZEDVERTEX, - "__module__": "google.cloud.automl_v1beta1.proto.geometry_pb2", - "__doc__": """Required. Horizontal coordinate. - - Attributes: - y: - Required. Vertical coordinate. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.NormalizedVertex) - }, -) -_sym_db.RegisterMessage(NormalizedVertex) - -BoundingPoly = _reflection.GeneratedProtocolMessageType( - "BoundingPoly", - (_message.Message,), - { - "DESCRIPTOR": _BOUNDINGPOLY, - "__module__": "google.cloud.automl_v1beta1.proto.geometry_pb2", - "__doc__": """A bounding polygon of a detected object on a plane. On output both - vertices and normalized_vertices are provided. The polygon is formed - by connecting vertices in the order they are listed. - - Attributes: - normalized_vertices: - Output only . The bounding polygon normalized vertices. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BoundingPoly) - }, -) -_sym_db.RegisterMessage(BoundingPoly) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/geometry_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/geometry_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/geometry_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/image.proto b/google/cloud/automl_v1beta1/proto/image.proto deleted file mode 100644 index 960eaeb0..00000000 --- a/google/cloud/automl_v1beta1/proto/image.proto +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/api/resource.proto"; -import "google/cloud/automl/v1beta1/annotation_spec.proto"; -import "google/cloud/automl/v1beta1/classification.proto"; -import "google/protobuf/timestamp.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_outer_classname = "ImageProto"; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// Dataset metadata that is specific to image classification. -message ImageClassificationDatasetMetadata { - // Required. Type of the classification problem. - ClassificationType classification_type = 1; -} - -// Dataset metadata specific to image object detection. -message ImageObjectDetectionDatasetMetadata { - -} - -// Model metadata for image classification. -message ImageClassificationModelMetadata { - // Optional. The ID of the `base` model. If it is specified, the new model - // will be created based on the `base` model. Otherwise, the new model will be - // created from scratch. The `base` model must be in the same - // `project` and `location` as the new model to create, and have the same - // `model_type`. - string base_model_id = 1; - - // Required. The train budget of creating this model, expressed in hours. The - // actual `train_cost` will be equal or less than this value. - int64 train_budget = 2; - - // Output only. The actual train cost of creating this model, expressed in - // hours. If this model is created from a `base` model, the train cost used - // to create the `base` model are not included. - int64 train_cost = 3; - - // Output only. The reason that this create model operation stopped, - // e.g. `BUDGET_REACHED`, `MODEL_CONVERGED`. - string stop_reason = 5; - - // Optional. Type of the model. The available values are: - // * `cloud` - Model to be used via prediction calls to AutoML API. - // This is the default value. - // * `mobile-low-latency-1` - A model that, in addition to providing - // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device - // with TensorFlow afterwards. Expected to have low latency, but - // may have lower prediction quality than other models. - // * `mobile-versatile-1` - A model that, in addition to providing - // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device - // with TensorFlow afterwards. - // * `mobile-high-accuracy-1` - A model that, in addition to providing - // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device - // with TensorFlow afterwards. Expected to have a higher - // latency, but should also have a higher prediction quality - // than other models. - // * `mobile-core-ml-low-latency-1` - A model that, in addition to providing - // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with Core - // ML afterwards. Expected to have low latency, but may have - // lower prediction quality than other models. - // * `mobile-core-ml-versatile-1` - A model that, in addition to providing - // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with Core - // ML afterwards. - // * `mobile-core-ml-high-accuracy-1` - A model that, in addition to - // providing prediction via AutoML API, can also be exported - // (see [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with - // Core ML afterwards. Expected to have a higher latency, but - // should also have a higher prediction quality than other - // models. - string model_type = 7; - - // Output only. An approximate number of online prediction QPS that can - // be supported by this model per each node on which it is deployed. - double node_qps = 13; - - // Output only. The number of nodes this model is deployed on. A node is an - // abstraction of a machine resource, which can handle online prediction QPS - // as given in the node_qps field. - int64 node_count = 14; -} - -// Model metadata specific to image object detection. -message ImageObjectDetectionModelMetadata { - // Optional. Type of the model. The available values are: - // * `cloud-high-accuracy-1` - (default) A model to be used via prediction - // calls to AutoML API. Expected to have a higher latency, but - // should also have a higher prediction quality than other - // models. - // * `cloud-low-latency-1` - A model to be used via prediction - // calls to AutoML API. Expected to have low latency, but may - // have lower prediction quality than other models. - // * `mobile-low-latency-1` - A model that, in addition to providing - // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device - // with TensorFlow afterwards. Expected to have low latency, but - // may have lower prediction quality than other models. - // * `mobile-versatile-1` - A model that, in addition to providing - // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device - // with TensorFlow afterwards. - // * `mobile-high-accuracy-1` - A model that, in addition to providing - // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device - // with TensorFlow afterwards. Expected to have a higher - // latency, but should also have a higher prediction quality - // than other models. - string model_type = 1; - - // Output only. The number of nodes this model is deployed on. A node is an - // abstraction of a machine resource, which can handle online prediction QPS - // as given in the qps_per_node field. - int64 node_count = 3; - - // Output only. An approximate number of online prediction QPS that can - // be supported by this model per each node on which it is deployed. - double node_qps = 4; - - // Output only. The reason that this create model operation stopped, - // e.g. `BUDGET_REACHED`, `MODEL_CONVERGED`. - string stop_reason = 5; - - // The train budget of creating this model, expressed in milli node - // hours i.e. 1,000 value in this field means 1 node hour. The actual - // `train_cost` will be equal or less than this value. If further model - // training ceases to provide any improvements, it will stop without using - // full budget and the stop_reason will be `MODEL_CONVERGED`. - // Note, node_hour = actual_hour * number_of_nodes_invovled. - // For model type `cloud-high-accuracy-1`(default) and `cloud-low-latency-1`, - // the train budget must be between 20,000 and 900,000 milli node hours, - // inclusive. The default value is 216, 000 which represents one day in - // wall time. - // For model type `mobile-low-latency-1`, `mobile-versatile-1`, - // `mobile-high-accuracy-1`, `mobile-core-ml-low-latency-1`, - // `mobile-core-ml-versatile-1`, `mobile-core-ml-high-accuracy-1`, the train - // budget must be between 1,000 and 100,000 milli node hours, inclusive. - // The default value is 24, 000 which represents one day in wall time. - int64 train_budget_milli_node_hours = 6; - - // Output only. The actual train cost of creating this model, expressed in - // milli node hours, i.e. 1,000 value in this field means 1 node hour. - // Guaranteed to not exceed the train budget. - int64 train_cost_milli_node_hours = 7; -} - -// Model deployment metadata specific to Image Classification. -message ImageClassificationModelDeploymentMetadata { - // Input only. The number of nodes to deploy the model on. A node is an - // abstraction of a machine resource, which can handle online prediction QPS - // as given in the model's - // - // [node_qps][google.cloud.automl.v1beta1.ImageClassificationModelMetadata.node_qps]. - // Must be between 1 and 100, inclusive on both ends. - int64 node_count = 1; -} - -// Model deployment metadata specific to Image Object Detection. -message ImageObjectDetectionModelDeploymentMetadata { - // Input only. The number of nodes to deploy the model on. A node is an - // abstraction of a machine resource, which can handle online prediction QPS - // as given in the model's - // - // [qps_per_node][google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata.qps_per_node]. - // Must be between 1 and 100, inclusive on both ends. - int64 node_count = 1; -} diff --git a/google/cloud/automl_v1beta1/proto/image_pb2.py b/google/cloud/automl_v1beta1/proto/image_pb2.py deleted file mode 100644 index 6f17f2c5..00000000 --- a/google/cloud/automl_v1beta1/proto/image_pb2.py +++ /dev/null @@ -1,726 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/image.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.automl_v1beta1.proto import ( - annotation_spec_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2, -) -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/image.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\nImageProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n-google/cloud/automl_v1beta1/proto/image.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a\x37google/cloud/automl_v1beta1/proto/annotation_spec.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"r\n"ImageClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"%\n#ImageObjectDetectionDatasetMetadata"\xb2\x01\n ImageClassificationModelMetadata\x12\x15\n\rbase_model_id\x18\x01 \x01(\t\x12\x14\n\x0ctrain_budget\x18\x02 \x01(\x03\x12\x12\n\ntrain_cost\x18\x03 \x01(\x03\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12\x12\n\nmodel_type\x18\x07 \x01(\t\x12\x10\n\x08node_qps\x18\r \x01(\x01\x12\x12\n\nnode_count\x18\x0e \x01(\x03"\xbe\x01\n!ImageObjectDetectionModelMetadata\x12\x12\n\nmodel_type\x18\x01 \x01(\t\x12\x12\n\nnode_count\x18\x03 \x01(\x03\x12\x10\n\x08node_qps\x18\x04 \x01(\x01\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03"@\n*ImageClassificationModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03"A\n+ImageObjectDetectionModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03\x42\xb1\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\nImageProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_IMAGECLASSIFICATIONDATASETMETADATA = _descriptor.Descriptor( - name="ImageClassificationDatasetMetadata", - full_name="google.cloud.automl.v1beta1.ImageClassificationDatasetMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="classification_type", - full_name="google.cloud.automl.v1beta1.ImageClassificationDatasetMetadata.classification_type", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=281, - serialized_end=395, -) - - -_IMAGEOBJECTDETECTIONDATASETMETADATA = _descriptor.Descriptor( - name="ImageObjectDetectionDatasetMetadata", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionDatasetMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=397, - serialized_end=434, -) - - -_IMAGECLASSIFICATIONMODELMETADATA = _descriptor.Descriptor( - name="ImageClassificationModelMetadata", - full_name="google.cloud.automl.v1beta1.ImageClassificationModelMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="base_model_id", - full_name="google.cloud.automl.v1beta1.ImageClassificationModelMetadata.base_model_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="train_budget", - full_name="google.cloud.automl.v1beta1.ImageClassificationModelMetadata.train_budget", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="train_cost", - full_name="google.cloud.automl.v1beta1.ImageClassificationModelMetadata.train_cost", - index=2, - number=3, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="stop_reason", - full_name="google.cloud.automl.v1beta1.ImageClassificationModelMetadata.stop_reason", - index=3, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="model_type", - full_name="google.cloud.automl.v1beta1.ImageClassificationModelMetadata.model_type", - index=4, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="node_qps", - full_name="google.cloud.automl.v1beta1.ImageClassificationModelMetadata.node_qps", - index=5, - number=13, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="node_count", - full_name="google.cloud.automl.v1beta1.ImageClassificationModelMetadata.node_count", - index=6, - number=14, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=437, - serialized_end=615, -) - - -_IMAGEOBJECTDETECTIONMODELMETADATA = _descriptor.Descriptor( - name="ImageObjectDetectionModelMetadata", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="model_type", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata.model_type", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="node_count", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata.node_count", - index=1, - number=3, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="node_qps", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata.node_qps", - index=2, - number=4, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="stop_reason", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata.stop_reason", - index=3, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="train_budget_milli_node_hours", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata.train_budget_milli_node_hours", - index=4, - number=6, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="train_cost_milli_node_hours", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata.train_cost_milli_node_hours", - index=5, - number=7, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=618, - serialized_end=808, -) - - -_IMAGECLASSIFICATIONMODELDEPLOYMENTMETADATA = _descriptor.Descriptor( - name="ImageClassificationModelDeploymentMetadata", - full_name="google.cloud.automl.v1beta1.ImageClassificationModelDeploymentMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="node_count", - full_name="google.cloud.automl.v1beta1.ImageClassificationModelDeploymentMetadata.node_count", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=810, - serialized_end=874, -) - - -_IMAGEOBJECTDETECTIONMODELDEPLOYMENTMETADATA = _descriptor.Descriptor( - name="ImageObjectDetectionModelDeploymentMetadata", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="node_count", - full_name="google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_count", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=876, - serialized_end=941, -) - -_IMAGECLASSIFICATIONDATASETMETADATA.fields_by_name[ - "classification_type" -].enum_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2._CLASSIFICATIONTYPE -) -DESCRIPTOR.message_types_by_name[ - "ImageClassificationDatasetMetadata" -] = _IMAGECLASSIFICATIONDATASETMETADATA -DESCRIPTOR.message_types_by_name[ - "ImageObjectDetectionDatasetMetadata" -] = _IMAGEOBJECTDETECTIONDATASETMETADATA -DESCRIPTOR.message_types_by_name[ - "ImageClassificationModelMetadata" -] = _IMAGECLASSIFICATIONMODELMETADATA -DESCRIPTOR.message_types_by_name[ - "ImageObjectDetectionModelMetadata" -] = _IMAGEOBJECTDETECTIONMODELMETADATA -DESCRIPTOR.message_types_by_name[ - "ImageClassificationModelDeploymentMetadata" -] = _IMAGECLASSIFICATIONMODELDEPLOYMENTMETADATA -DESCRIPTOR.message_types_by_name[ - "ImageObjectDetectionModelDeploymentMetadata" -] = _IMAGEOBJECTDETECTIONMODELDEPLOYMENTMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ImageClassificationDatasetMetadata = _reflection.GeneratedProtocolMessageType( - "ImageClassificationDatasetMetadata", - (_message.Message,), - { - "DESCRIPTOR": _IMAGECLASSIFICATIONDATASETMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", - "__doc__": """Dataset metadata that is specific to image classification. - - Attributes: - classification_type: - Required. Type of the classification problem. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageClassificationDatasetMetadata) - }, -) -_sym_db.RegisterMessage(ImageClassificationDatasetMetadata) - -ImageObjectDetectionDatasetMetadata = _reflection.GeneratedProtocolMessageType( - "ImageObjectDetectionDatasetMetadata", - (_message.Message,), - { - "DESCRIPTOR": _IMAGEOBJECTDETECTIONDATASETMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", - "__doc__": """Dataset metadata specific to image object detection.""", - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionDatasetMetadata) - }, -) -_sym_db.RegisterMessage(ImageObjectDetectionDatasetMetadata) - -ImageClassificationModelMetadata = _reflection.GeneratedProtocolMessageType( - "ImageClassificationModelMetadata", - (_message.Message,), - { - "DESCRIPTOR": _IMAGECLASSIFICATIONMODELMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", - "__doc__": """Model metadata for image classification. - - Attributes: - base_model_id: - Optional. The ID of the ``base`` model. If it is specified, - the new model will be created based on the ``base`` model. - Otherwise, the new model will be created from scratch. The - ``base`` model must be in the same ``project`` and - ``location`` as the new model to create, and have the same - ``model_type``. - train_budget: - Required. The train budget of creating this model, expressed - in hours. The actual ``train_cost`` will be equal or less than - this value. - train_cost: - Output only. The actual train cost of creating this model, - expressed in hours. If this model is created from a ``base`` - model, the train cost used to create the ``base`` model are - not included. - stop_reason: - Output only. The reason that this create model operation - stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. - model_type: - Optional. Type of the model. The available values are: \* - ``cloud`` - Model to be used via prediction calls to AutoML - API. This is the default value. \* ``mobile-low-latency-1`` - - A model that, in addition to providing prediction via AutoML - API, can also be exported (see [AutoMl.ExportModel][google.clo - ud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or - edge device with TensorFlow afterwards. Expected to have low - latency, but may have lower prediction quality than other - models. \* ``mobile-versatile-1`` - A model that, in addition - to providing prediction via AutoML API, can also be exported - (see [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.E - xportModel]) and used on a mobile or edge device with - TensorFlow afterwards. \* ``mobile-high-accuracy-1`` - A model - that, in addition to providing prediction via AutoML API, can - also be exported (see [AutoMl.ExportModel][google.cloud.automl - .v1beta1.AutoMl.ExportModel]) and used on a mobile or edge - device with TensorFlow afterwards. Expected to have a higher - latency, but should also have a higher prediction quality than - other models. \* ``mobile-core-ml-low-latency-1`` - A model - that, in addition to providing prediction via AutoML API, can - also be exported (see [AutoMl.ExportModel][google.cloud.automl - .v1beta1.AutoMl.ExportModel]) and used on a mobile device with - Core ML afterwards. Expected to have low latency, but may have - lower prediction quality than other models. \* ``mobile-core- - ml-versatile-1`` - A model that, in addition to providing - prediction via AutoML API, can also be exported (see [AutoMl.E - xportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - and used on a mobile device with Core ML afterwards. \* - ``mobile-core-ml-high-accuracy-1`` - A model that, in addition - to providing prediction via AutoML API, can also be exported - (see [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.E - xportModel]) and used on a mobile device with Core ML - afterwards. Expected to have a higher latency, but should also - have a higher prediction quality than other models. - node_qps: - Output only. An approximate number of online prediction QPS - that can be supported by this model per each node on which it - is deployed. - node_count: - Output only. The number of nodes this model is deployed on. A - node is an abstraction of a machine resource, which can handle - online prediction QPS as given in the node_qps field. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageClassificationModelMetadata) - }, -) -_sym_db.RegisterMessage(ImageClassificationModelMetadata) - -ImageObjectDetectionModelMetadata = _reflection.GeneratedProtocolMessageType( - "ImageObjectDetectionModelMetadata", - (_message.Message,), - { - "DESCRIPTOR": _IMAGEOBJECTDETECTIONMODELMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", - "__doc__": """Model metadata specific to image object detection. - - Attributes: - model_type: - Optional. Type of the model. The available values are: \* - ``cloud-high-accuracy-1`` - (default) A model to be used via - prediction calls to AutoML API. Expected to have a higher - latency, but should also have a higher prediction quality than - other models. \* ``cloud-low-latency-1`` - A model to be used - via prediction calls to AutoML API. Expected to have low - latency, but may have lower prediction quality than other - models. \* ``mobile-low-latency-1`` - A model that, in - addition to providing prediction via AutoML API, can also be - exported (see [AutoMl.ExportModel][google.cloud.automl.v1beta1 - .AutoMl.ExportModel]) and used on a mobile or edge device with - TensorFlow afterwards. Expected to have low latency, but may - have lower prediction quality than other models. \* ``mobile- - versatile-1`` - A model that, in addition to providing - prediction via AutoML API, can also be exported (see [AutoMl.E - xportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - and used on a mobile or edge device with TensorFlow - afterwards. \* ``mobile-high-accuracy-1`` - A model that, in - addition to providing prediction via AutoML API, can also be - exported (see [AutoMl.ExportModel][google.cloud.automl.v1beta1 - .AutoMl.ExportModel]) and used on a mobile or edge device with - TensorFlow afterwards. Expected to have a higher latency, but - should also have a higher prediction quality than other - models. - node_count: - Output only. The number of nodes this model is deployed on. A - node is an abstraction of a machine resource, which can handle - online prediction QPS as given in the qps_per_node field. - node_qps: - Output only. An approximate number of online prediction QPS - that can be supported by this model per each node on which it - is deployed. - stop_reason: - Output only. The reason that this create model operation - stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. - train_budget_milli_node_hours: - The train budget of creating this model, expressed in milli - node hours i.e. 1,000 value in this field means 1 node hour. - The actual ``train_cost`` will be equal or less than this - value. If further model training ceases to provide any - improvements, it will stop without using full budget and the - stop_reason will be ``MODEL_CONVERGED``. Note, node_hour = - actual_hour \* number_of_nodes_invovled. For model type - ``cloud-high-accuracy-1``\ (default) and ``cloud-low- - latency-1``, the train budget must be between 20,000 and - 900,000 milli node hours, inclusive. The default value is 216, - 000 which represents one day in wall time. For model type - ``mobile-low-latency-1``, ``mobile-versatile-1``, ``mobile- - high-accuracy-1``, ``mobile-core-ml-low-latency-1``, ``mobile- - core-ml-versatile-1``, ``mobile-core-ml-high-accuracy-1``, the - train budget must be between 1,000 and 100,000 milli node - hours, inclusive. The default value is 24, 000 which - represents one day in wall time. - train_cost_milli_node_hours: - Output only. The actual train cost of creating this model, - expressed in milli node hours, i.e. 1,000 value in this field - means 1 node hour. Guaranteed to not exceed the train budget. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata) - }, -) -_sym_db.RegisterMessage(ImageObjectDetectionModelMetadata) - -ImageClassificationModelDeploymentMetadata = _reflection.GeneratedProtocolMessageType( - "ImageClassificationModelDeploymentMetadata", - (_message.Message,), - { - "DESCRIPTOR": _IMAGECLASSIFICATIONMODELDEPLOYMENTMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", - "__doc__": """Model deployment metadata specific to Image Classification. - - Attributes: - node_count: - Input only. The number of nodes to deploy the model on. A node - is an abstraction of a machine resource, which can handle - online prediction QPS as given in the model’s [node_qps][goog - le.cloud.automl.v1beta1.ImageClassificationModelMetadata.node\_ - qps]. Must be between 1 and 100, inclusive on both ends. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageClassificationModelDeploymentMetadata) - }, -) -_sym_db.RegisterMessage(ImageClassificationModelDeploymentMetadata) - -ImageObjectDetectionModelDeploymentMetadata = _reflection.GeneratedProtocolMessageType( - "ImageObjectDetectionModelDeploymentMetadata", - (_message.Message,), - { - "DESCRIPTOR": _IMAGEOBJECTDETECTIONMODELDEPLOYMENTMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", - "__doc__": """Model deployment metadata specific to Image Object Detection. - - Attributes: - node_count: - Input only. The number of nodes to deploy the model on. A node - is an abstraction of a machine resource, which can handle - online prediction QPS as given in the model’s [qps_per_node][ - google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata. - qps_per_node]. Must be between 1 and 100, inclusive on both - ends. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata) - }, -) -_sym_db.RegisterMessage(ImageObjectDetectionModelDeploymentMetadata) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/image_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/image_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/image_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/io.proto b/google/cloud/automl_v1beta1/proto/io.proto deleted file mode 100644 index a9979383..00000000 --- a/google/cloud/automl_v1beta1/proto/io.proto +++ /dev/null @@ -1,1132 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// Input configuration for ImportData Action. -// -// The format of input depends on dataset_metadata the Dataset into which -// the import is happening has. As input source the -// [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] -// is expected, unless specified otherwise. Additionally any input .CSV file -// by itself must be 100MB or smaller, unless specified otherwise. -// If an "example" file (that is, image, video etc.) with identical content -// (even if it had different GCS_FILE_PATH) is mentioned multiple times, then -// its label, bounding boxes etc. are appended. The same file should be always -// provided with the same ML_USE and GCS_FILE_PATH, if it is not, then -// these values are nondeterministically selected from the given ones. -// -// The formats are represented in EBNF with commas being literal and with -// non-terminal symbols defined near the end of this comment. The formats are: -// -// * For Image Classification: -// CSV file(s) with each line in format: -// ML_USE,GCS_FILE_PATH,LABEL,LABEL,... -// GCS_FILE_PATH leads to image of up to 30MB in size. Supported -// extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP, .TIFF, .ICO -// For MULTICLASS classification type, at most one LABEL is allowed -// per image. If an image has not yet been labeled, then it should be -// mentioned just once with no LABEL. -// Some sample rows: -// TRAIN,gs://folder/image1.jpg,daisy -// TEST,gs://folder/image2.jpg,dandelion,tulip,rose -// UNASSIGNED,gs://folder/image3.jpg,daisy -// UNASSIGNED,gs://folder/image4.jpg -// -// * For Image Object Detection: -// CSV file(s) with each line in format: -// ML_USE,GCS_FILE_PATH,(LABEL,BOUNDING_BOX | ,,,,,,,) -// GCS_FILE_PATH leads to image of up to 30MB in size. Supported -// extensions: .JPEG, .GIF, .PNG. -// Each image is assumed to be exhaustively labeled. The minimum -// allowed BOUNDING_BOX edge length is 0.01, and no more than 500 -// BOUNDING_BOX-es per image are allowed (one BOUNDING_BOX is defined -// per line). If an image has not yet been labeled, then it should be -// mentioned just once with no LABEL and the ",,,,,,," in place of the -// BOUNDING_BOX. For images which are known to not contain any -// bounding boxes, they should be labelled explictly as -// "NEGATIVE_IMAGE", followed by ",,,,,,," in place of the -// BOUNDING_BOX. -// Sample rows: -// TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, -// TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, -// UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 -// TEST,gs://folder/im3.png,,,,,,,,, -// TRAIN,gs://folder/im4.png,NEGATIVE_IMAGE,,,,,,,,, -// -// * For Video Classification: -// CSV file(s) with each line in format: -// ML_USE,GCS_FILE_PATH -// where ML_USE VALIDATE value should not be used. The GCS_FILE_PATH -// should lead to another .csv file which describes examples that have -// given ML_USE, using the following row format: -// GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,) -// Here GCS_FILE_PATH leads to a video of up to 50GB in size and up -// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. -// TIME_SEGMENT_START and TIME_SEGMENT_END must be within the -// length of the video, and end has to be after the start. Any segment -// of a video which has one or more labels on it, is considered a -// hard negative for all other labels. Any segment with no labels on -// it is considered to be unknown. If a whole video is unknown, then -// it shuold be mentioned just once with ",," in place of LABEL, -// TIME_SEGMENT_START,TIME_SEGMENT_END. -// Sample top level CSV file: -// TRAIN,gs://folder/train_videos.csv -// TEST,gs://folder/test_videos.csv -// UNASSIGNED,gs://folder/other_videos.csv -// Sample rows of a CSV file for a particular ML_USE: -// gs://folder/video1.avi,car,120,180.000021 -// gs://folder/video1.avi,bike,150,180.000021 -// gs://folder/vid2.avi,car,0,60.5 -// gs://folder/vid3.avi,,, -// -// * For Video Object Tracking: -// CSV file(s) with each line in format: -// ML_USE,GCS_FILE_PATH -// where ML_USE VALIDATE value should not be used. The GCS_FILE_PATH -// should lead to another .csv file which describes examples that have -// given ML_USE, using one of the following row format: -// GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX -// or -// GCS_FILE_PATH,,,,,,,,,, -// Here GCS_FILE_PATH leads to a video of up to 50GB in size and up -// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. -// Providing INSTANCE_IDs can help to obtain a better model. When -// a specific labeled entity leaves the video frame, and shows up -// afterwards it is not required, albeit preferable, that the same -// INSTANCE_ID is given to it. -// TIMESTAMP must be within the length of the video, the -// BOUNDING_BOX is assumed to be drawn on the closest video's frame -// to the TIMESTAMP. Any mentioned by the TIMESTAMP frame is expected -// to be exhaustively labeled and no more than 500 BOUNDING_BOX-es per -// frame are allowed. If a whole video is unknown, then it should be -// mentioned just once with ",,,,,,,,,," in place of LABEL, -// [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX. -// Sample top level CSV file: -// TRAIN,gs://folder/train_videos.csv -// TEST,gs://folder/test_videos.csv -// UNASSIGNED,gs://folder/other_videos.csv -// Seven sample rows of a CSV file for a particular ML_USE: -// gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 -// gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 -// gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 -// gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, -// gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, -// gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, -// gs://folder/video2.avi,,,,,,,,,,, -// * For Text Extraction: -// CSV file(s) with each line in format: -// ML_USE,GCS_FILE_PATH -// GCS_FILE_PATH leads to a .JSONL (that is, JSON Lines) file which -// either imports text in-line or as documents. Any given -// .JSONL file must be 100MB or smaller. -// The in-line .JSONL file contains, per line, a proto that wraps a -// TextSnippet proto (in json representation) followed by one or more -// AnnotationPayload protos (called annotations), which have -// display_name and text_extraction detail populated. The given text -// is expected to be annotated exhaustively, for example, if you look -// for animals and text contains "dolphin" that is not labeled, then -// "dolphin" is assumed to not be an animal. Any given text snippet -// content must be 10KB or smaller, and also be UTF-8 NFC encoded -// (ASCII already is). -// The document .JSONL file contains, per line, a proto that wraps a -// Document proto. The Document proto must have either document_text -// or input_config set. In document_text case, the Document proto may -// also contain the spatial information of the document, including -// layout, document dimension and page number. In input_config case, -// only PDF documents are supported now, and each document may be up -// to 2MB large. Currently, annotations on documents cannot be -// specified at import. -// Three sample CSV rows: -// TRAIN,gs://folder/file1.jsonl -// VALIDATE,gs://folder/file2.jsonl -// TEST,gs://folder/file3.jsonl -// Sample in-line JSON Lines file for entity extraction (presented here -// with artificial line breaks, but the only actual line break is -// denoted by \n).: -// { -// "document": { -// "document_text": {"content": "dog cat"} -// "layout": [ -// { -// "text_segment": { -// "start_offset": 0, -// "end_offset": 3, -// }, -// "page_number": 1, -// "bounding_poly": { -// "normalized_vertices": [ -// {"x": 0.1, "y": 0.1}, -// {"x": 0.1, "y": 0.3}, -// {"x": 0.3, "y": 0.3}, -// {"x": 0.3, "y": 0.1}, -// ], -// }, -// "text_segment_type": TOKEN, -// }, -// { -// "text_segment": { -// "start_offset": 4, -// "end_offset": 7, -// }, -// "page_number": 1, -// "bounding_poly": { -// "normalized_vertices": [ -// {"x": 0.4, "y": 0.1}, -// {"x": 0.4, "y": 0.3}, -// {"x": 0.8, "y": 0.3}, -// {"x": 0.8, "y": 0.1}, -// ], -// }, -// "text_segment_type": TOKEN, -// } -// -// ], -// "document_dimensions": { -// "width": 8.27, -// "height": 11.69, -// "unit": INCH, -// } -// "page_count": 1, -// }, -// "annotations": [ -// { -// "display_name": "animal", -// "text_extraction": {"text_segment": {"start_offset": 0, -// "end_offset": 3}} -// }, -// { -// "display_name": "animal", -// "text_extraction": {"text_segment": {"start_offset": 4, -// "end_offset": 7}} -// } -// ], -// }\n -// { -// "text_snippet": { -// "content": "This dog is good." -// }, -// "annotations": [ -// { -// "display_name": "animal", -// "text_extraction": { -// "text_segment": {"start_offset": 5, "end_offset": 8} -// } -// } -// ] -// } -// Sample document JSON Lines file (presented here with artificial line -// breaks, but the only actual line break is denoted by \n).: -// { -// "document": { -// "input_config": { -// "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] -// } -// } -// } -// }\n -// { -// "document": { -// "input_config": { -// "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] -// } -// } -// } -// } -// -// * For Text Classification: -// CSV file(s) with each line in format: -// ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,... -// TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If -// the column content is a valid gcs file path, i.e. prefixed by -// "gs://", it will be treated as a GCS_FILE_PATH, else if the content -// is enclosed within double quotes (""), it is -// treated as a TEXT_SNIPPET. In the GCS_FILE_PATH case, the path -// must lead to a .txt file with UTF-8 encoding, for example, -// "gs://folder/content.txt", and the content in it is extracted -// as a text snippet. In TEXT_SNIPPET case, the column content -// excluding quotes is treated as to be imported text snippet. In -// both cases, the text snippet/file size must be within 128kB. -// Maximum 100 unique labels are allowed per CSV row. -// Sample rows: -// TRAIN,"They have bad food and very rude",RudeService,BadFood -// TRAIN,gs://folder/content.txt,SlowService -// TEST,"Typically always bad service there.",RudeService -// VALIDATE,"Stomach ache to go.",BadFood -// -// * For Text Sentiment: -// CSV file(s) with each line in format: -// ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT -// TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If -// the column content is a valid gcs file path, that is, prefixed by -// "gs://", it is treated as a GCS_FILE_PATH, otherwise it is treated -// as a TEXT_SNIPPET. In the GCS_FILE_PATH case, the path -// must lead to a .txt file with UTF-8 encoding, for example, -// "gs://folder/content.txt", and the content in it is extracted -// as a text snippet. In TEXT_SNIPPET case, the column content itself -// is treated as to be imported text snippet. In both cases, the -// text snippet must be up to 500 characters long. -// Sample rows: -// TRAIN,"@freewrytin this is way too good for your product",2 -// TRAIN,"I need this product so bad",3 -// TEST,"Thank you for this product.",4 -// VALIDATE,gs://folder/content.txt,2 -// -// * For Tables: -// Either -// [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or -// -// [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source] -// can be used. All inputs is concatenated into a single -// -// [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_name] -// For gcs_source: -// CSV file(s), where the first row of the first file is the header, -// containing unique column names. If the first row of a subsequent -// file is the same as the header, then it is also treated as a -// header. All other rows contain values for the corresponding -// columns. -// Each .CSV file by itself must be 10GB or smaller, and their total -// size must be 100GB or smaller. -// First three sample rows of a CSV file: -// "Id","First Name","Last Name","Dob","Addresses" -// -// "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" -// -// "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} -// For bigquery_source: -// An URI of a BigQuery table. The user data size of the BigQuery -// table must be 100GB or smaller. -// An imported table must have between 2 and 1,000 columns, inclusive, -// and between 1000 and 100,000,000 rows, inclusive. There are at most 5 -// import data running in parallel. -// Definitions: -// ML_USE = "TRAIN" | "VALIDATE" | "TEST" | "UNASSIGNED" -// Describes how the given example (file) should be used for model -// training. "UNASSIGNED" can be used when user has no preference. -// GCS_FILE_PATH = A path to file on GCS, e.g. "gs://folder/image1.png". -// LABEL = A display name of an object on an image, video etc., e.g. "dog". -// Must be up to 32 characters long and can consist only of ASCII -// Latin letters A-Z and a-z, underscores(_), and ASCII digits 0-9. -// For each label an AnnotationSpec is created which display_name -// becomes the label; AnnotationSpecs are given back in predictions. -// INSTANCE_ID = A positive integer that identifies a specific instance of a -// labeled entity on an example. Used e.g. to track two cars on -// a video while being able to tell apart which one is which. -// BOUNDING_BOX = VERTEX,VERTEX,VERTEX,VERTEX | VERTEX,,,VERTEX,, -// A rectangle parallel to the frame of the example (image, -// video). If 4 vertices are given they are connected by edges -// in the order provided, if 2 are given they are recognized -// as diagonally opposite vertices of the rectangle. -// VERTEX = COORDINATE,COORDINATE -// First coordinate is horizontal (x), the second is vertical (y). -// COORDINATE = A float in 0 to 1 range, relative to total length of -// image or video in given dimension. For fractions the -// leading non-decimal 0 can be omitted (i.e. 0.3 = .3). -// Point 0,0 is in top left. -// TIME_SEGMENT_START = TIME_OFFSET -// Expresses a beginning, inclusive, of a time segment -// within an example that has a time dimension -// (e.g. video). -// TIME_SEGMENT_END = TIME_OFFSET -// Expresses an end, exclusive, of a time segment within -// an example that has a time dimension (e.g. video). -// TIME_OFFSET = A number of seconds as measured from the start of an -// example (e.g. video). Fractions are allowed, up to a -// microsecond precision. "inf" is allowed, and it means the end -// of the example. -// TEXT_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed within -// double quotes (""). -// SENTIMENT = An integer between 0 and -// Dataset.text_sentiment_dataset_metadata.sentiment_max -// (inclusive). Describes the ordinal of the sentiment - higher -// value means a more positive sentiment. All the values are -// completely relative, i.e. neither 0 needs to mean a negative or -// neutral sentiment nor sentiment_max needs to mean a positive one -// - it is just required that 0 is the least positive sentiment -// in the data, and sentiment_max is the most positive one. -// The SENTIMENT shouldn't be confused with "score" or "magnitude" -// from the previous Natural Language Sentiment Analysis API. -// All SENTIMENT values between 0 and sentiment_max must be -// represented in the imported data. On prediction the same 0 to -// sentiment_max range will be used. The difference between -// neighboring sentiment values needs not to be uniform, e.g. 1 and -// 2 may be similar whereas the difference between 2 and 3 may be -// huge. -// -// Errors: -// If any of the provided CSV files can't be parsed or if more than certain -// percent of CSV rows cannot be processed then the operation fails and -// nothing is imported. Regardless of overall success or failure the per-row -// failures, up to a certain count cap, is listed in -// Operation.metadata.partial_failures. -// -message InputConfig { - // The source of the input. - oneof source { - // The Google Cloud Storage location for the input content. - // In ImportData, the gcs_source points to a csv with structure described in - // the comment. - GcsSource gcs_source = 1; - - // The BigQuery location for the input content. - BigQuerySource bigquery_source = 3; - } - - // Additional domain-specific parameters describing the semantic of the - // imported data, any string must be up to 25000 - // characters long. - // - // * For Tables: - // `schema_inference_version` - (integer) Required. The version of the - // algorithm that should be used for the initial inference of the - // schema (columns' DataTypes) of the table the data is being imported - // into. Allowed values: "1". - map params = 2; -} - -// Input configuration for BatchPredict Action. -// -// The format of input depends on the ML problem of the model used for -// prediction. As input source the -// [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] -// is expected, unless specified otherwise. -// -// The formats are represented in EBNF with commas being literal and with -// non-terminal symbols defined near the end of this comment. The formats -// are: -// -// * For Image Classification: -// CSV file(s) with each line having just a single column: -// GCS_FILE_PATH -// which leads to image of up to 30MB in size. Supported -// extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in -// the Batch predict output. -// Three sample rows: -// gs://folder/image1.jpeg -// gs://folder/image2.gif -// gs://folder/image3.png -// -// * For Image Object Detection: -// CSV file(s) with each line having just a single column: -// GCS_FILE_PATH -// which leads to image of up to 30MB in size. Supported -// extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in -// the Batch predict output. -// Three sample rows: -// gs://folder/image1.jpeg -// gs://folder/image2.gif -// gs://folder/image3.png -// * For Video Classification: -// CSV file(s) with each line in format: -// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END -// GCS_FILE_PATH leads to video of up to 50GB in size and up to 3h -// duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. -// TIME_SEGMENT_START and TIME_SEGMENT_END must be within the -// length of the video, and end has to be after the start. -// Three sample rows: -// gs://folder/video1.mp4,10,40 -// gs://folder/video1.mp4,20,60 -// gs://folder/vid2.mov,0,inf -// -// * For Video Object Tracking: -// CSV file(s) with each line in format: -// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END -// GCS_FILE_PATH leads to video of up to 50GB in size and up to 3h -// duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. -// TIME_SEGMENT_START and TIME_SEGMENT_END must be within the -// length of the video, and end has to be after the start. -// Three sample rows: -// gs://folder/video1.mp4,10,240 -// gs://folder/video1.mp4,300,360 -// gs://folder/vid2.mov,0,inf -// * For Text Classification: -// CSV file(s) with each line having just a single column: -// GCS_FILE_PATH | TEXT_SNIPPET -// Any given text file can have size upto 128kB. -// Any given text snippet content must have 60,000 characters or less. -// Three sample rows: -// gs://folder/text1.txt -// "Some text content to predict" -// gs://folder/text3.pdf -// Supported file extensions: .txt, .pdf -// -// * For Text Sentiment: -// CSV file(s) with each line having just a single column: -// GCS_FILE_PATH | TEXT_SNIPPET -// Any given text file can have size upto 128kB. -// Any given text snippet content must have 500 characters or less. -// Three sample rows: -// gs://folder/text1.txt -// "Some text content to predict" -// gs://folder/text3.pdf -// Supported file extensions: .txt, .pdf -// -// * For Text Extraction -// .JSONL (i.e. JSON Lines) file(s) which either provide text in-line or -// as documents (for a single BatchPredict call only one of the these -// formats may be used). -// The in-line .JSONL file(s) contain per line a proto that -// wraps a temporary user-assigned TextSnippet ID (string up to 2000 -// characters long) called "id", a TextSnippet proto (in -// json representation) and zero or more TextFeature protos. Any given -// text snippet content must have 30,000 characters or less, and also -// be UTF-8 NFC encoded (ASCII already is). The IDs provided should be -// unique. -// The document .JSONL file(s) contain, per line, a proto that wraps a -// Document proto with input_config set. Only PDF documents are -// supported now, and each document must be up to 2MB large. -// Any given .JSONL file must be 100MB or smaller, and no more than 20 -// files may be given. -// Sample in-line JSON Lines file (presented here with artificial line -// breaks, but the only actual line break is denoted by \n): -// { -// "id": "my_first_id", -// "text_snippet": { "content": "dog car cat"}, -// "text_features": [ -// { -// "text_segment": {"start_offset": 4, "end_offset": 6}, -// "structural_type": PARAGRAPH, -// "bounding_poly": { -// "normalized_vertices": [ -// {"x": 0.1, "y": 0.1}, -// {"x": 0.1, "y": 0.3}, -// {"x": 0.3, "y": 0.3}, -// {"x": 0.3, "y": 0.1}, -// ] -// }, -// } -// ], -// }\n -// { -// "id": "2", -// "text_snippet": { -// "content": "An elaborate content", -// "mime_type": "text/plain" -// } -// } -// Sample document JSON Lines file (presented here with artificial line -// breaks, but the only actual line break is denoted by \n).: -// { -// "document": { -// "input_config": { -// "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] -// } -// } -// } -// }\n -// { -// "document": { -// "input_config": { -// "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] -// } -// } -// } -// } -// -// * For Tables: -// Either -// [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or -// -// [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source]. -// GCS case: -// CSV file(s), each by itself 10GB or smaller and total size must be -// 100GB or smaller, where first file must have a header containing -// column names. If the first row of a subsequent file is the same as -// the header, then it is also treated as a header. All other rows -// contain values for the corresponding columns. -// The column names must contain the model's -// -// [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] -// -// [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] -// (order doesn't matter). The columns corresponding to the model's -// input feature column specs must contain values compatible with the -// column spec's data types. Prediction on all the rows, i.e. the CSV -// lines, will be attempted. For FORECASTING -// -// [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: -// all columns having -// -// [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] -// type will be ignored. -// First three sample rows of a CSV file: -// "First Name","Last Name","Dob","Addresses" -// -// "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" -// -// "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} -// BigQuery case: -// An URI of a BigQuery table. The user data size of the BigQuery -// table must be 100GB or smaller. -// The column names must contain the model's -// -// [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] -// -// [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] -// (order doesn't matter). The columns corresponding to the model's -// input feature column specs must contain values compatible with the -// column spec's data types. Prediction on all the rows of the table -// will be attempted. For FORECASTING -// -// [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: -// all columns having -// -// [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] -// type will be ignored. -// -// Definitions: -// GCS_FILE_PATH = A path to file on GCS, e.g. "gs://folder/video.avi". -// TEXT_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed within -// double quotes ("") -// TIME_SEGMENT_START = TIME_OFFSET -// Expresses a beginning, inclusive, of a time segment -// within an -// example that has a time dimension (e.g. video). -// TIME_SEGMENT_END = TIME_OFFSET -// Expresses an end, exclusive, of a time segment within -// an example that has a time dimension (e.g. video). -// TIME_OFFSET = A number of seconds as measured from the start of an -// example (e.g. video). Fractions are allowed, up to a -// microsecond precision. "inf" is allowed and it means the end -// of the example. -// -// Errors: -// If any of the provided CSV files can't be parsed or if more than certain -// percent of CSV rows cannot be processed then the operation fails and -// prediction does not happen. Regardless of overall success or failure the -// per-row failures, up to a certain count cap, will be listed in -// Operation.metadata.partial_failures. -message BatchPredictInputConfig { - // Required. The source of the input. - oneof source { - // The Google Cloud Storage location for the input content. - GcsSource gcs_source = 1; - - // The BigQuery location for the input content. - BigQuerySource bigquery_source = 2; - } -} - -// Input configuration of a [Document][google.cloud.automl.v1beta1.Document]. -message DocumentInputConfig { - // The Google Cloud Storage location of the document file. Only a single path - // should be given. - // Max supported size: 512MB. - // Supported extensions: .PDF. - GcsSource gcs_source = 1; -} - -// * For Translation: -// CSV file `translation.csv`, with each line in format: -// ML_USE,GCS_FILE_PATH -// GCS_FILE_PATH leads to a .TSV file which describes examples that have -// given ML_USE, using the following row format per line: -// TEXT_SNIPPET (in source language) \t TEXT_SNIPPET (in target -// language) -// -// * For Tables: -// Output depends on whether the dataset was imported from GCS or -// BigQuery. -// GCS case: -// -// [gcs_destination][google.cloud.automl.v1beta1.OutputConfig.gcs_destination] -// must be set. Exported are CSV file(s) `tables_1.csv`, -// `tables_2.csv`,...,`tables_N.csv` with each having as header line -// the table's column names, and all other lines contain values for -// the header columns. -// BigQuery case: -// -// [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] -// pointing to a BigQuery project must be set. In the given project a -// new dataset will be created with name -// -// `export_data__` -// where will be made -// BigQuery-dataset-name compatible (e.g. most special characters will -// become underscores), and timestamp will be in -// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that -// dataset a new table called `primary_table` will be created, and -// filled with precisely the same data as this obtained on import. -message OutputConfig { - // Required. The destination of the output. - oneof destination { - // The Google Cloud Storage location where the output is to be written to. - // For Image Object Detection, Text Extraction, Video Classification and - // Tables, in the given directory a new directory will be created with name: - // export_data-- where - // timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export - // output will be written into that directory. - GcsDestination gcs_destination = 1; - - // The BigQuery location where the output is to be written to. - BigQueryDestination bigquery_destination = 2; - } -} - -// Output configuration for BatchPredict Action. -// -// As destination the -// -// [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination] -// must be set unless specified otherwise for a domain. If gcs_destination is -// set then in the given directory a new directory is created. Its name -// will be -// "prediction--", -// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents -// of it depends on the ML problem the predictions are made for. -// -// * For Image Classification: -// In the created directory files `image_classification_1.jsonl`, -// `image_classification_2.jsonl`,...,`image_classification_N.jsonl` -// will be created, where N may be 1, and depends on the -// total number of the successfully predicted images and annotations. -// A single image will be listed only once with all its annotations, -// and its annotations will never be split across files. -// Each .JSONL file will contain, per line, a JSON representation of a -// proto that wraps image's "ID" : "" followed by a list of -// zero or more AnnotationPayload protos (called annotations), which -// have classification detail populated. -// If prediction for any image failed (partially or completely), then an -// additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl` -// files will be created (N depends on total number of failed -// predictions). These files will have a JSON representation of a proto -// that wraps the same "ID" : "" but here followed by -// exactly one -// -// [`google.rpc.Status`](https: -// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) -// containing only `code` and `message`fields. -// -// * For Image Object Detection: -// In the created directory files `image_object_detection_1.jsonl`, -// `image_object_detection_2.jsonl`,...,`image_object_detection_N.jsonl` -// will be created, where N may be 1, and depends on the -// total number of the successfully predicted images and annotations. -// Each .JSONL file will contain, per line, a JSON representation of a -// proto that wraps image's "ID" : "" followed by a list of -// zero or more AnnotationPayload protos (called annotations), which -// have image_object_detection detail populated. A single image will -// be listed only once with all its annotations, and its annotations -// will never be split across files. -// If prediction for any image failed (partially or completely), then -// additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl` -// files will be created (N depends on total number of failed -// predictions). These files will have a JSON representation of a proto -// that wraps the same "ID" : "" but here followed by -// exactly one -// -// [`google.rpc.Status`](https: -// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) -// containing only `code` and `message`fields. -// * For Video Classification: -// In the created directory a video_classification.csv file, and a .JSON -// file per each video classification requested in the input (i.e. each -// line in given CSV(s)), will be created. -// -// The format of video_classification.csv is: -// -// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS -// where: -// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 -// the prediction input lines (i.e. video_classification.csv has -// precisely the same number of lines as the prediction input had.) -// JSON_FILE_NAME = Name of .JSON file in the output directory, which -// contains prediction responses for the video time segment. -// STATUS = "OK" if prediction completed successfully, or an error code -// with message otherwise. If STATUS is not "OK" then the .JSON file -// for that line may not exist or be empty. -// -// Each .JSON file, assuming STATUS is "OK", will contain a list of -// AnnotationPayload protos in JSON format, which are the predictions -// for the video time segment the file is assigned to in the -// video_classification.csv. All AnnotationPayload protos will have -// video_classification field set, and will be sorted by -// video_classification.type field (note that the returned types are -// governed by `classifaction_types` parameter in -// [PredictService.BatchPredictRequest.params][]). -// -// * For Video Object Tracking: -// In the created directory a video_object_tracking.csv file will be -// created, and multiple files video_object_trackinng_1.json, -// video_object_trackinng_2.json,..., video_object_trackinng_N.json, -// where N is the number of requests in the input (i.e. the number of -// lines in given CSV(s)). -// -// The format of video_object_tracking.csv is: -// -// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS -// where: -// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 -// the prediction input lines (i.e. video_object_tracking.csv has -// precisely the same number of lines as the prediction input had.) -// JSON_FILE_NAME = Name of .JSON file in the output directory, which -// contains prediction responses for the video time segment. -// STATUS = "OK" if prediction completed successfully, or an error -// code with message otherwise. If STATUS is not "OK" then the .JSON -// file for that line may not exist or be empty. -// -// Each .JSON file, assuming STATUS is "OK", will contain a list of -// AnnotationPayload protos in JSON format, which are the predictions -// for each frame of the video time segment the file is assigned to in -// video_object_tracking.csv. All AnnotationPayload protos will have -// video_object_tracking field set. -// * For Text Classification: -// In the created directory files `text_classification_1.jsonl`, -// `text_classification_2.jsonl`,...,`text_classification_N.jsonl` -// will be created, where N may be 1, and depends on the -// total number of inputs and annotations found. -// -// Each .JSONL file will contain, per line, a JSON representation of a -// proto that wraps input text snippet or input text file and a list of -// zero or more AnnotationPayload protos (called annotations), which -// have classification detail populated. A single text snippet or file -// will be listed only once with all its annotations, and its -// annotations will never be split across files. -// -// If prediction for any text snippet or file failed (partially or -// completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., -// `errors_N.jsonl` files will be created (N depends on total number of -// failed predictions). These files will have a JSON representation of a -// proto that wraps input text snippet or input text file followed by -// exactly one -// -// [`google.rpc.Status`](https: -// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) -// containing only `code` and `message`. -// -// * For Text Sentiment: -// In the created directory files `text_sentiment_1.jsonl`, -// `text_sentiment_2.jsonl`,...,`text_sentiment_N.jsonl` -// will be created, where N may be 1, and depends on the -// total number of inputs and annotations found. -// -// Each .JSONL file will contain, per line, a JSON representation of a -// proto that wraps input text snippet or input text file and a list of -// zero or more AnnotationPayload protos (called annotations), which -// have text_sentiment detail populated. A single text snippet or file -// will be listed only once with all its annotations, and its -// annotations will never be split across files. -// -// If prediction for any text snippet or file failed (partially or -// completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., -// `errors_N.jsonl` files will be created (N depends on total number of -// failed predictions). These files will have a JSON representation of a -// proto that wraps input text snippet or input text file followed by -// exactly one -// -// [`google.rpc.Status`](https: -// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) -// containing only `code` and `message`. -// -// * For Text Extraction: -// In the created directory files `text_extraction_1.jsonl`, -// `text_extraction_2.jsonl`,...,`text_extraction_N.jsonl` -// will be created, where N may be 1, and depends on the -// total number of inputs and annotations found. -// The contents of these .JSONL file(s) depend on whether the input -// used inline text, or documents. -// If input was inline, then each .JSONL file will contain, per line, -// a JSON representation of a proto that wraps given in request text -// snippet's "id" (if specified), followed by input text snippet, -// and a list of zero or more -// AnnotationPayload protos (called annotations), which have -// text_extraction detail populated. A single text snippet will be -// listed only once with all its annotations, and its annotations will -// never be split across files. -// If input used documents, then each .JSONL file will contain, per -// line, a JSON representation of a proto that wraps given in request -// document proto, followed by its OCR-ed representation in the form -// of a text snippet, finally followed by a list of zero or more -// AnnotationPayload protos (called annotations), which have -// text_extraction detail populated and refer, via their indices, to -// the OCR-ed text snippet. A single document (and its text snippet) -// will be listed only once with all its annotations, and its -// annotations will never be split across files. -// If prediction for any text snippet failed (partially or completely), -// then additional `errors_1.jsonl`, `errors_2.jsonl`,..., -// `errors_N.jsonl` files will be created (N depends on total number of -// failed predictions). These files will have a JSON representation of a -// proto that wraps either the "id" : "" (in case of inline) -// or the document proto (in case of document) but here followed by -// exactly one -// -// [`google.rpc.Status`](https: -// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) -// containing only `code` and `message`. -// -// * For Tables: -// Output depends on whether -// -// [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination] -// or -// -// [bigquery_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.bigquery_destination] -// is set (either is allowed). -// GCS case: -// In the created directory files `tables_1.csv`, `tables_2.csv`,..., -// `tables_N.csv` will be created, where N may be 1, and depends on -// the total number of the successfully predicted rows. -// For all CLASSIFICATION -// -// [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: -// Each .csv file will contain a header, listing all columns' -// -// [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] -// given on input followed by M target column names in the format of -// -// "<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] -// -// [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>__score" where M is the number of distinct target values, -// i.e. number of distinct values in the target column of the table -// used to train the model. Subsequent lines will contain the -// respective values of successfully predicted rows, with the last, -// i.e. the target, columns having the corresponding prediction -// [scores][google.cloud.automl.v1beta1.TablesAnnotation.score]. -// For REGRESSION and FORECASTING -// -// [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: -// Each .csv file will contain a header, listing all columns' -// [display_name-s][google.cloud.automl.v1beta1.display_name] given -// on input followed by the predicted target column with name in the -// format of -// -// "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] -// -// [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" -// Subsequent lines will contain the respective values of -// successfully predicted rows, with the last, i.e. the target, -// column having the predicted target value. -// If prediction for any rows failed, then an additional -// `errors_1.csv`, `errors_2.csv`,..., `errors_N.csv` will be -// created (N depends on total number of failed rows). These files -// will have analogous format as `tables_*.csv`, but always with a -// single target column having -// -// [`google.rpc.Status`](https: -// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) -// represented as a JSON string, and containing only `code` and -// `message`. -// BigQuery case: -// -// [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] -// pointing to a BigQuery project must be set. In the given project a -// new dataset will be created with name -// `prediction__` -// where will be made -// BigQuery-dataset-name compatible (e.g. most special characters will -// become underscores), and timestamp will be in -// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset -// two tables will be created, `predictions`, and `errors`. -// The `predictions` table's column names will be the input columns' -// -// [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] -// followed by the target column with name in the format of -// -// "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] -// -// [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" -// The input feature columns will contain the respective values of -// successfully predicted rows, with the target column having an -// ARRAY of -// -// [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], -// represented as STRUCT-s, containing -// [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. -// The `errors` table contains rows for which the prediction has -// failed, it has analogous input columns while the target column name -// is in the format of -// -// "errors_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] -// -// [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>", -// and as a value has -// -// [`google.rpc.Status`](https: -// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) -// represented as a STRUCT, and containing only `code` and `message`. -message BatchPredictOutputConfig { - // Required. The destination of the output. - oneof destination { - // The Google Cloud Storage location of the directory where the output is to - // be written to. - GcsDestination gcs_destination = 1; - - // The BigQuery location where the output is to be written to. - BigQueryDestination bigquery_destination = 2; - } -} - -// Output configuration for ModelExport Action. -message ModelExportOutputConfig { - // Required. The destination of the output. - oneof destination { - // The Google Cloud Storage location where the model is to be written to. - // This location may only be set for the following model formats: - // "tflite", "edgetpu_tflite", "tf_saved_model", "tf_js", "core_ml". - // - // Under the directory given as the destination a new one with name - // "model-export--", - // where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, - // will be created. Inside the model and any of its supporting files - // will be written. - GcsDestination gcs_destination = 1; - - // The GCR location where model image is to be pushed to. This location - // may only be set for the following model formats: - // "docker". - // - // The model image will be created under the given URI. - GcrDestination gcr_destination = 3; - } - - // The format in which the model must be exported. The available, and default, - // formats depend on the problem and model type (if given problem and type - // combination doesn't have a format listed, it means its models are not - // exportable): - // - // * For Image Classification mobile-low-latency-1, mobile-versatile-1, - // mobile-high-accuracy-1: - // "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js", - // "docker". - // - // * For Image Classification mobile-core-ml-low-latency-1, - // mobile-core-ml-versatile-1, mobile-core-ml-high-accuracy-1: - // "core_ml" (default). - // Formats description: - // - // * tflite - Used for Android mobile devices. - // * edgetpu_tflite - Used for [Edge TPU](https://cloud.google.com/edge-tpu/) - // devices. - // * tf_saved_model - A tensorflow model in SavedModel format. - // * tf_js - A [TensorFlow.js](https://www.tensorflow.org/js) model that can - // be used in the browser and in Node.js using JavaScript. - // * docker - Used for Docker containers. Use the params field to customize - // the container. The container is verified to work correctly on - // ubuntu 16.04 operating system. See more at - // [containers - // - // quickstart](https: - // //cloud.google.com/vision/automl/docs/containers-gcs-quickstart) - // * core_ml - Used for iOS mobile devices. - string model_format = 4; - - // Additional model-type and format specific parameters describing the - // requirements for the to be exported model files, any string must be up to - // 25000 characters long. - // - // * For `docker` format: - // `cpu_architecture` - (string) "x86_64" (default). - // `gpu_architecture` - (string) "none" (default), "nvidia". - map params = 2; -} - -// Output configuration for ExportEvaluatedExamples Action. Note that this call -// is available only for 30 days since the moment the model was evaluated. -// The output depends on the domain, as follows (note that only examples from -// the TEST set are exported): -// -// * For Tables: -// -// [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] -// pointing to a BigQuery project must be set. In the given project a -// new dataset will be created with name -// -// `export_evaluated_examples__` -// where will be made BigQuery-dataset-name -// compatible (e.g. most special characters will become underscores), -// and timestamp will be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" -// format. In the dataset an `evaluated_examples` table will be -// created. It will have all the same columns as the -// -// [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_spec_id] -// of the -// [dataset][google.cloud.automl.v1beta1.Model.dataset_id] from which -// the model was created, as they were at the moment of model's -// evaluation (this includes the target column with its ground -// truth), followed by a column called "predicted_". That -// last column will contain the model's prediction result for each -// respective row, given as ARRAY of -// [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], -// represented as STRUCT-s, containing -// [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. -message ExportEvaluatedExamplesOutputConfig { - // Required. The destination of the output. - oneof destination { - // The BigQuery location where the output is to be written to. - BigQueryDestination bigquery_destination = 2; - } -} - -// The Google Cloud Storage location for the input content. -message GcsSource { - // Required. Google Cloud Storage URIs to input files, up to 2000 characters - // long. Accepted forms: - // * Full object path, e.g. gs://bucket/directory/object.csv - repeated string input_uris = 1; -} - -// The BigQuery location for the input content. -message BigQuerySource { - // Required. BigQuery URI to a table, up to 2000 characters long. - // Accepted forms: - // * BigQuery path e.g. bq://projectId.bqDatasetId.bqTableId - string input_uri = 1; -} - -// The Google Cloud Storage location where the output is to be written to. -message GcsDestination { - // Required. Google Cloud Storage URI to output directory, up to 2000 - // characters long. - // Accepted forms: - // * Prefix path: gs://bucket/directory - // The requesting user must have write permission to the bucket. - // The directory is created if it doesn't exist. - string output_uri_prefix = 1; -} - -// The BigQuery location for the output content. -message BigQueryDestination { - // Required. BigQuery URI to a project, up to 2000 characters long. - // Accepted forms: - // * BigQuery path e.g. bq://projectId - string output_uri = 1; -} - -// The GCR location where the image must be pushed to. -message GcrDestination { - // Required. Google Contained Registry URI of the new image, up to 2000 - // characters long. See - // - // https: - // //cloud.google.com/container-registry/do - // // cs/pushing-and-pulling#pushing_an_image_to_a_registry - // Accepted forms: - // * [HOSTNAME]/[PROJECT-ID]/[IMAGE] - // * [HOSTNAME]/[PROJECT-ID]/[IMAGE]:[TAG] - // - // The requesting user must have permission to push images the project. - string output_uri = 1; -} diff --git a/google/cloud/automl_v1beta1/proto/io_pb2.py b/google/cloud/automl_v1beta1/proto/io_pb2.py deleted file mode 100644 index 13bd8be1..00000000 --- a/google/cloud/automl_v1beta1/proto/io_pb2.py +++ /dev/null @@ -1,1873 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/io.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/io.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n*google/cloud/automl_v1beta1/proto/io.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\x92\x02\n\x0bInputConfig\x12<\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSourceH\x00\x12\x46\n\x0f\x62igquery_source\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.BigQuerySourceH\x00\x12\x44\n\x06params\x18\x02 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.InputConfig.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06source"\xa9\x01\n\x17\x42\x61tchPredictInputConfig\x12<\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSourceH\x00\x12\x46\n\x0f\x62igquery_source\x18\x02 \x01(\x0b\x32+.google.cloud.automl.v1beta1.BigQuerySourceH\x00\x42\x08\n\x06source"Q\n\x13\x44ocumentInputConfig\x12:\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSource"\xb7\x01\n\x0cOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\xc3\x01\n\x18\x42\x61tchPredictOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\xcf\x02\n\x17ModelExportOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12\x46\n\x0fgcr_destination\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcrDestinationH\x00\x12\x14\n\x0cmodel_format\x18\x04 \x01(\t\x12P\n\x06params\x18\x02 \x03(\x0b\x32@.google.cloud.automl.v1beta1.ModelExportOutputConfig.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\r\n\x0b\x64\x65stination"\x86\x01\n#ExportEvaluatedExamplesOutputConfig\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\x1f\n\tGcsSource\x12\x12\n\ninput_uris\x18\x01 \x03(\t"#\n\x0e\x42igQuerySource\x12\x11\n\tinput_uri\x18\x01 \x01(\t"+\n\x0eGcsDestination\x12\x19\n\x11output_uri_prefix\x18\x01 \x01(\t")\n\x13\x42igQueryDestination\x12\x12\n\noutput_uri\x18\x01 \x01(\t"$\n\x0eGcrDestination\x12\x12\n\noutput_uri\x18\x01 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], -) - - -_INPUTCONFIG_PARAMSENTRY = _descriptor.Descriptor( - name="ParamsEntry", - full_name="google.cloud.automl.v1beta1.InputConfig.ParamsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.automl.v1beta1.InputConfig.ParamsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.automl.v1beta1.InputConfig.ParamsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=325, - serialized_end=370, -) - -_INPUTCONFIG = _descriptor.Descriptor( - name="InputConfig", - full_name="google.cloud.automl.v1beta1.InputConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="gcs_source", - full_name="google.cloud.automl.v1beta1.InputConfig.gcs_source", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="bigquery_source", - full_name="google.cloud.automl.v1beta1.InputConfig.bigquery_source", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="params", - full_name="google.cloud.automl.v1beta1.InputConfig.params", - index=2, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_INPUTCONFIG_PARAMSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="source", - full_name="google.cloud.automl.v1beta1.InputConfig.source", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=106, - serialized_end=380, -) - - -_BATCHPREDICTINPUTCONFIG = _descriptor.Descriptor( - name="BatchPredictInputConfig", - full_name="google.cloud.automl.v1beta1.BatchPredictInputConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="gcs_source", - full_name="google.cloud.automl.v1beta1.BatchPredictInputConfig.gcs_source", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="bigquery_source", - full_name="google.cloud.automl.v1beta1.BatchPredictInputConfig.bigquery_source", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="source", - full_name="google.cloud.automl.v1beta1.BatchPredictInputConfig.source", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=383, - serialized_end=552, -) - - -_DOCUMENTINPUTCONFIG = _descriptor.Descriptor( - name="DocumentInputConfig", - full_name="google.cloud.automl.v1beta1.DocumentInputConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="gcs_source", - full_name="google.cloud.automl.v1beta1.DocumentInputConfig.gcs_source", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=554, - serialized_end=635, -) - - -_OUTPUTCONFIG = _descriptor.Descriptor( - name="OutputConfig", - full_name="google.cloud.automl.v1beta1.OutputConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="gcs_destination", - full_name="google.cloud.automl.v1beta1.OutputConfig.gcs_destination", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="bigquery_destination", - full_name="google.cloud.automl.v1beta1.OutputConfig.bigquery_destination", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="destination", - full_name="google.cloud.automl.v1beta1.OutputConfig.destination", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=638, - serialized_end=821, -) - - -_BATCHPREDICTOUTPUTCONFIG = _descriptor.Descriptor( - name="BatchPredictOutputConfig", - full_name="google.cloud.automl.v1beta1.BatchPredictOutputConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="gcs_destination", - full_name="google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="bigquery_destination", - full_name="google.cloud.automl.v1beta1.BatchPredictOutputConfig.bigquery_destination", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="destination", - full_name="google.cloud.automl.v1beta1.BatchPredictOutputConfig.destination", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=824, - serialized_end=1019, -) - - -_MODELEXPORTOUTPUTCONFIG_PARAMSENTRY = _descriptor.Descriptor( - name="ParamsEntry", - full_name="google.cloud.automl.v1beta1.ModelExportOutputConfig.ParamsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.automl.v1beta1.ModelExportOutputConfig.ParamsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.automl.v1beta1.ModelExportOutputConfig.ParamsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=325, - serialized_end=370, -) - -_MODELEXPORTOUTPUTCONFIG = _descriptor.Descriptor( - name="ModelExportOutputConfig", - full_name="google.cloud.automl.v1beta1.ModelExportOutputConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="gcs_destination", - full_name="google.cloud.automl.v1beta1.ModelExportOutputConfig.gcs_destination", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="gcr_destination", - full_name="google.cloud.automl.v1beta1.ModelExportOutputConfig.gcr_destination", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="model_format", - full_name="google.cloud.automl.v1beta1.ModelExportOutputConfig.model_format", - index=2, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="params", - full_name="google.cloud.automl.v1beta1.ModelExportOutputConfig.params", - index=3, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_MODELEXPORTOUTPUTCONFIG_PARAMSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="destination", - full_name="google.cloud.automl.v1beta1.ModelExportOutputConfig.destination", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1022, - serialized_end=1357, -) - - -_EXPORTEVALUATEDEXAMPLESOUTPUTCONFIG = _descriptor.Descriptor( - name="ExportEvaluatedExamplesOutputConfig", - full_name="google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="bigquery_destination", - full_name="google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig.bigquery_destination", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="destination", - full_name="google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig.destination", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1360, - serialized_end=1494, -) - - -_GCSSOURCE = _descriptor.Descriptor( - name="GcsSource", - full_name="google.cloud.automl.v1beta1.GcsSource", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="input_uris", - full_name="google.cloud.automl.v1beta1.GcsSource.input_uris", - index=0, - number=1, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1496, - serialized_end=1527, -) - - -_BIGQUERYSOURCE = _descriptor.Descriptor( - name="BigQuerySource", - full_name="google.cloud.automl.v1beta1.BigQuerySource", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="input_uri", - full_name="google.cloud.automl.v1beta1.BigQuerySource.input_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1529, - serialized_end=1564, -) - - -_GCSDESTINATION = _descriptor.Descriptor( - name="GcsDestination", - full_name="google.cloud.automl.v1beta1.GcsDestination", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="output_uri_prefix", - full_name="google.cloud.automl.v1beta1.GcsDestination.output_uri_prefix", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1566, - serialized_end=1609, -) - - -_BIGQUERYDESTINATION = _descriptor.Descriptor( - name="BigQueryDestination", - full_name="google.cloud.automl.v1beta1.BigQueryDestination", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="output_uri", - full_name="google.cloud.automl.v1beta1.BigQueryDestination.output_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1611, - serialized_end=1652, -) - - -_GCRDESTINATION = _descriptor.Descriptor( - name="GcrDestination", - full_name="google.cloud.automl.v1beta1.GcrDestination", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="output_uri", - full_name="google.cloud.automl.v1beta1.GcrDestination.output_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1654, - serialized_end=1690, -) - -_INPUTCONFIG_PARAMSENTRY.containing_type = _INPUTCONFIG -_INPUTCONFIG.fields_by_name["gcs_source"].message_type = _GCSSOURCE -_INPUTCONFIG.fields_by_name["bigquery_source"].message_type = _BIGQUERYSOURCE -_INPUTCONFIG.fields_by_name["params"].message_type = _INPUTCONFIG_PARAMSENTRY -_INPUTCONFIG.oneofs_by_name["source"].fields.append( - _INPUTCONFIG.fields_by_name["gcs_source"] -) -_INPUTCONFIG.fields_by_name[ - "gcs_source" -].containing_oneof = _INPUTCONFIG.oneofs_by_name["source"] -_INPUTCONFIG.oneofs_by_name["source"].fields.append( - _INPUTCONFIG.fields_by_name["bigquery_source"] -) -_INPUTCONFIG.fields_by_name[ - "bigquery_source" -].containing_oneof = _INPUTCONFIG.oneofs_by_name["source"] -_BATCHPREDICTINPUTCONFIG.fields_by_name["gcs_source"].message_type = _GCSSOURCE -_BATCHPREDICTINPUTCONFIG.fields_by_name[ - "bigquery_source" -].message_type = _BIGQUERYSOURCE -_BATCHPREDICTINPUTCONFIG.oneofs_by_name["source"].fields.append( - _BATCHPREDICTINPUTCONFIG.fields_by_name["gcs_source"] -) -_BATCHPREDICTINPUTCONFIG.fields_by_name[ - "gcs_source" -].containing_oneof = _BATCHPREDICTINPUTCONFIG.oneofs_by_name["source"] -_BATCHPREDICTINPUTCONFIG.oneofs_by_name["source"].fields.append( - _BATCHPREDICTINPUTCONFIG.fields_by_name["bigquery_source"] -) -_BATCHPREDICTINPUTCONFIG.fields_by_name[ - "bigquery_source" -].containing_oneof = _BATCHPREDICTINPUTCONFIG.oneofs_by_name["source"] -_DOCUMENTINPUTCONFIG.fields_by_name["gcs_source"].message_type = _GCSSOURCE -_OUTPUTCONFIG.fields_by_name["gcs_destination"].message_type = _GCSDESTINATION -_OUTPUTCONFIG.fields_by_name["bigquery_destination"].message_type = _BIGQUERYDESTINATION -_OUTPUTCONFIG.oneofs_by_name["destination"].fields.append( - _OUTPUTCONFIG.fields_by_name["gcs_destination"] -) -_OUTPUTCONFIG.fields_by_name[ - "gcs_destination" -].containing_oneof = _OUTPUTCONFIG.oneofs_by_name["destination"] -_OUTPUTCONFIG.oneofs_by_name["destination"].fields.append( - _OUTPUTCONFIG.fields_by_name["bigquery_destination"] -) -_OUTPUTCONFIG.fields_by_name[ - "bigquery_destination" -].containing_oneof = _OUTPUTCONFIG.oneofs_by_name["destination"] -_BATCHPREDICTOUTPUTCONFIG.fields_by_name[ - "gcs_destination" -].message_type = _GCSDESTINATION -_BATCHPREDICTOUTPUTCONFIG.fields_by_name[ - "bigquery_destination" -].message_type = _BIGQUERYDESTINATION -_BATCHPREDICTOUTPUTCONFIG.oneofs_by_name["destination"].fields.append( - _BATCHPREDICTOUTPUTCONFIG.fields_by_name["gcs_destination"] -) -_BATCHPREDICTOUTPUTCONFIG.fields_by_name[ - "gcs_destination" -].containing_oneof = _BATCHPREDICTOUTPUTCONFIG.oneofs_by_name["destination"] -_BATCHPREDICTOUTPUTCONFIG.oneofs_by_name["destination"].fields.append( - _BATCHPREDICTOUTPUTCONFIG.fields_by_name["bigquery_destination"] -) -_BATCHPREDICTOUTPUTCONFIG.fields_by_name[ - "bigquery_destination" -].containing_oneof = _BATCHPREDICTOUTPUTCONFIG.oneofs_by_name["destination"] -_MODELEXPORTOUTPUTCONFIG_PARAMSENTRY.containing_type = _MODELEXPORTOUTPUTCONFIG -_MODELEXPORTOUTPUTCONFIG.fields_by_name[ - "gcs_destination" -].message_type = _GCSDESTINATION -_MODELEXPORTOUTPUTCONFIG.fields_by_name[ - "gcr_destination" -].message_type = _GCRDESTINATION -_MODELEXPORTOUTPUTCONFIG.fields_by_name[ - "params" -].message_type = _MODELEXPORTOUTPUTCONFIG_PARAMSENTRY -_MODELEXPORTOUTPUTCONFIG.oneofs_by_name["destination"].fields.append( - _MODELEXPORTOUTPUTCONFIG.fields_by_name["gcs_destination"] -) -_MODELEXPORTOUTPUTCONFIG.fields_by_name[ - "gcs_destination" -].containing_oneof = _MODELEXPORTOUTPUTCONFIG.oneofs_by_name["destination"] -_MODELEXPORTOUTPUTCONFIG.oneofs_by_name["destination"].fields.append( - _MODELEXPORTOUTPUTCONFIG.fields_by_name["gcr_destination"] -) -_MODELEXPORTOUTPUTCONFIG.fields_by_name[ - "gcr_destination" -].containing_oneof = _MODELEXPORTOUTPUTCONFIG.oneofs_by_name["destination"] -_EXPORTEVALUATEDEXAMPLESOUTPUTCONFIG.fields_by_name[ - "bigquery_destination" -].message_type = _BIGQUERYDESTINATION -_EXPORTEVALUATEDEXAMPLESOUTPUTCONFIG.oneofs_by_name["destination"].fields.append( - _EXPORTEVALUATEDEXAMPLESOUTPUTCONFIG.fields_by_name["bigquery_destination"] -) -_EXPORTEVALUATEDEXAMPLESOUTPUTCONFIG.fields_by_name[ - "bigquery_destination" -].containing_oneof = _EXPORTEVALUATEDEXAMPLESOUTPUTCONFIG.oneofs_by_name["destination"] -DESCRIPTOR.message_types_by_name["InputConfig"] = _INPUTCONFIG -DESCRIPTOR.message_types_by_name["BatchPredictInputConfig"] = _BATCHPREDICTINPUTCONFIG -DESCRIPTOR.message_types_by_name["DocumentInputConfig"] = _DOCUMENTINPUTCONFIG -DESCRIPTOR.message_types_by_name["OutputConfig"] = _OUTPUTCONFIG -DESCRIPTOR.message_types_by_name["BatchPredictOutputConfig"] = _BATCHPREDICTOUTPUTCONFIG -DESCRIPTOR.message_types_by_name["ModelExportOutputConfig"] = _MODELEXPORTOUTPUTCONFIG -DESCRIPTOR.message_types_by_name[ - "ExportEvaluatedExamplesOutputConfig" -] = _EXPORTEVALUATEDEXAMPLESOUTPUTCONFIG -DESCRIPTOR.message_types_by_name["GcsSource"] = _GCSSOURCE -DESCRIPTOR.message_types_by_name["BigQuerySource"] = _BIGQUERYSOURCE -DESCRIPTOR.message_types_by_name["GcsDestination"] = _GCSDESTINATION -DESCRIPTOR.message_types_by_name["BigQueryDestination"] = _BIGQUERYDESTINATION -DESCRIPTOR.message_types_by_name["GcrDestination"] = _GCRDESTINATION -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -InputConfig = _reflection.GeneratedProtocolMessageType( - "InputConfig", - (_message.Message,), - { - "ParamsEntry": _reflection.GeneratedProtocolMessageType( - "ParamsEntry", - (_message.Message,), - { - "DESCRIPTOR": _INPUTCONFIG_PARAMSENTRY, - "__module__": "google.cloud.automl_v1beta1.proto.io_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.InputConfig.ParamsEntry) - }, - ), - "DESCRIPTOR": _INPUTCONFIG, - "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """Input configuration for ImportData Action. The format of input - depends on dataset_metadata the Dataset into which the import is - happening has. As input source the - [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is - expected, unless specified otherwise. Additionally any input .CSV file - by itself must be 100MB or smaller, unless specified otherwise. If an - “example” file (that is, image, video etc.) with identical content - (even if it had different GCS_FILE_PATH) is mentioned multiple times, - then its label, bounding boxes etc. are appended. The same file should - be always provided with the same ML_USE and GCS_FILE_PATH, if it is - not, then these values are nondeterministically selected from the - given ones. The formats are represented in EBNF with commas being - literal and with non-terminal symbols defined near the end of this - comment. The formats are: - For Image Classification: CSV file(s) - with each line in format: ML_USE,GCS_FILE_PATH,LABEL,LABEL,… - GCS_FILE_PATH leads to image of up to 30MB in size. Supported - extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP, .TIFF, .ICO For - MULTICLASS classification type, at most one LABEL is allowed per - image. If an image has not yet been labeled, then it should be - mentioned just once with no LABEL. Some sample rows: - TRAIN,gs://folder/image1.jpg,daisy - TEST,gs://folder/image2.jpg,dandelion,tulip,rose - UNASSIGNED,gs://folder/image3.jpg,daisy - UNASSIGNED,gs://folder/image4.jpg - For Image Object Detection: CSV - file(s) with each line in format: - ML_USE,GCS_FILE_PATH,(LABEL,BOUNDING_BOX \| ,,,,,,,) GCS_FILE_PATH - leads to image of up to 30MB in size. Supported extensions: .JPEG, - .GIF, .PNG. Each image is assumed to be exhaustively labeled. The - minimum allowed BOUNDING_BOX edge length is 0.01, and no more than - 500 BOUNDING_BOX-es per image are allowed (one BOUNDING_BOX is - defined per line). If an image has not yet been labeled, then it - should be mentioned just once with no LABEL and the “,,,,,,,” in - place of the BOUNDING_BOX. For images which are known to not contain - any bounding boxes, they should be labelled explictly as - “NEGATIVE_IMAGE”, followed by “,,,,,,,” in place of the BOUNDING_BOX. - Sample rows: TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, - TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, - UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 - TEST,gs://folder/im3.png,,,,,,,,, - TRAIN,gs://folder/im4.png,NEGATIVE_IMAGE,,,,,,,,, - For Video - Classification: CSV file(s) with each line in format: - ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be used. - The GCS_FILE_PATH should lead to another .csv file which describes - examples that have given ML_USE, using the following row format: - GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END \| ,,) Here - GCS_FILE_PATH leads to a video of up to 50GB in size and up to 3h - duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. - TIME_SEGMENT_START and TIME_SEGMENT_END must be within the length of - the video, and end has to be after the start. Any segment of a video - which has one or more labels on it, is considered a hard negative for - all other labels. Any segment with no labels on it is considered to - be unknown. If a whole video is unknown, then it shuold be mentioned - just once with “,,” in place of LABEL, - TIME_SEGMENT_START,TIME_SEGMENT_END. Sample top level CSV file: - TRAIN,gs://folder/train_videos.csv TEST,gs://folder/test_videos.csv - UNASSIGNED,gs://folder/other_videos.csv Sample rows of a CSV file for - a particular ML_USE: gs://folder/video1.avi,car,120,180.000021 - gs://folder/video1.avi,bike,150,180.000021 - gs://folder/vid2.avi,car,0,60.5 gs://folder/vid3.avi,,, - For Video - Object Tracking: CSV file(s) with each line in format: - ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be used. - The GCS_FILE_PATH should lead to another .csv file which describes - examples that have given ML_USE, using one of the following row - format: GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX or - GCS_FILE_PATH,,,,,,,,,, Here GCS_FILE_PATH leads to a video of up to - 50GB in size and up to 3h duration. Supported extensions: .MOV, - .MPEG4, .MP4, .AVI. Providing INSTANCE_IDs can help to obtain a - better model. When a specific labeled entity leaves the video frame, - and shows up afterwards it is not required, albeit preferable, that - the same INSTANCE_ID is given to it. TIMESTAMP must be within the - length of the video, the BOUNDING_BOX is assumed to be drawn on the - closest video’s frame to the TIMESTAMP. Any mentioned by the - TIMESTAMP frame is expected to be exhaustively labeled and no more - than 500 BOUNDING_BOX-es per frame are allowed. If a whole video is - unknown, then it should be mentioned just once with “,,,,,,,,,,” in - place of LABEL, [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX. Sample top - level CSV file: TRAIN,gs://folder/train_videos.csv - TEST,gs://folder/test_videos.csv - UNASSIGNED,gs://folder/other_videos.csv Seven sample rows of a CSV - file for a particular ML_USE: - gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 - gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 - gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 - gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, - gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, - gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, - gs://folder/video2.avi,,,,,,,,,,, - For Text Extraction: CSV file(s) - with each line in format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads - to a .JSONL (that is, JSON Lines) file which either imports text - in-line or as documents. Any given .JSONL file must be 100MB or - smaller. The in-line .JSONL file contains, per line, a proto that - wraps a TextSnippet proto (in json representation) followed by one - or more AnnotationPayload protos (called annotations), which have - display_name and text_extraction detail populated. The given text - is expected to be annotated exhaustively, for example, if you look - for animals and text contains “dolphin” that is not labeled, then - “dolphin” is assumed to not be an animal. Any given text snippet - content must be 10KB or smaller, and also be UTF-8 NFC encoded - (ASCII already is). The document .JSONL file contains, per line, a - proto that wraps a Document proto. The Document proto must have - either document_text or input_config set. In document_text case, - the Document proto may also contain the spatial information of the - document, including layout, document dimension and page number. In - input_config case, only PDF documents are supported now, and each - document may be up to 2MB large. Currently, annotations on - documents cannot be specified at import. Three sample CSV rows: - TRAIN,gs://folder/file1.jsonl VALIDATE,gs://folder/file2.jsonl - TEST,gs://folder/file3.jsonl Sample in-line JSON Lines file for - entity extraction (presented here with artificial line breaks, but - the only actual line break is denoted by :raw-latex:`\n`).: { - “document”: { “document_text”: {“content”: “dog cat”} “layout”: [ { - “text_segment”: { “start_offset”: 0, “end_offset”: 3, }, - “page_number”: 1, “bounding_poly”: { “normalized_vertices”: [ {“x”: - 0.1, “y”: 0.1}, {“x”: 0.1, “y”: 0.3}, {“x”: 0.3, “y”: 0.3}, {“x”: - 0.3, “y”: 0.1}, ], }, “text_segment_type”: TOKEN, }, { - “text_segment”: { “start_offset”: 4, “end_offset”: 7, }, - “page_number”: 1, “bounding_poly”: { “normalized_vertices”: [ {“x”: - 0.4, “y”: 0.1}, {“x”: 0.4, “y”: 0.3}, {“x”: 0.8, “y”: 0.3}, {“x”: - 0.8, “y”: 0.1}, ], }, “text_segment_type”: TOKEN, }], - “document_dimensions”: { “width”: 8.27, “height”: 11.69, “unit”: - INCH, } “page_count”: 1, }, “annotations”: [ { “display_name”: - “animal”, “text_extraction”: {“text_segment”: {“start_offset”: 0, - “end_offset”: 3}} }, { “display_name”: “animal”, “text_extraction”: - {“text_segment”: {“start_offset”: 4, “end_offset”: 7}} } ], }:raw- - latex:`\n { "text_snippet": { - "content": "This dog is good." }, - "annotations": [ { "display_name": - "animal", "text_extraction": { - "text_segment": {"start_offset": 5, "end_offset": 8} } - } ] }` Sample document JSON Lines file (presented - here with artificial line breaks, but the only actual line break is - denoted by :raw-latex:`\n`).: { “document”: { “input_config”: { - “gcs_source”: { “input_uris”: [ “gs://folder/document1.pdf” ] } } } - }:raw-latex:`\n { "document": { - "input_config": { "gcs_source": { "input_uris": [ - "gs://folder/document2.pdf" ] } } - } }` - For Text Classification: CSV file(s) with each line - in format: ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),LABEL,LABEL,… - TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If - the column content is a valid gcs file path, i.e. prefixed by - “gs://”, it will be treated as a GCS_FILE_PATH, else if the content - is enclosed within double quotes ("“), it is treated as a - TEXT_SNIPPET. In the GCS_FILE_PATH case, the path must lead to a - .txt file with UTF-8 encoding, for - example,”gs://folder/content.txt“, and the content in it is - extracted as a text snippet. In TEXT_SNIPPET case, the column - content excluding quotes is treated as to be imported text snippet. - In both cases, the text snippet/file size must be within 128kB. - Maximum 100 unique labels are allowed per CSV row. Sample rows: - TRAIN,”They have bad food and very rude“,RudeService,BadFood - TRAIN,gs://folder/content.txt,SlowService TEST,”Typically always bad - service there.“,RudeService VALIDATE,”Stomach ache to go.",BadFood - - For Text Sentiment: CSV file(s) with each line in format: - ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),SENTIMENT TEXT_SNIPPET and - GCS_FILE_PATH are distinguished by a pattern. If the column content - is a valid gcs file path, that is, prefixed by “gs://”, it is treated - as a GCS_FILE_PATH, otherwise it is treated as a TEXT_SNIPPET. In the - GCS_FILE_PATH case, the path must lead to a .txt file with UTF-8 - encoding, for example, “gs://folder/content.txt”, and the content in - it is extracted as a text snippet. In TEXT_SNIPPET case, the column - content itself is treated as to be imported text snippet. In both - cases, the text snippet must be up to 500 characters long. Sample - rows: TRAIN,“@freewrytin this is way too good for your product”,2 - TRAIN,“I need this product so bad”,3 TEST,“Thank you for this - product.”,4 VALIDATE,gs://folder/content.txt,2 - For Tables: Either - [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or [ - bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_sour - ce] can be used. All inputs is concatenated into a single [primary_ta - ble][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_n - ame] For gcs_source: CSV file(s), where the first row of the first - file is the header, containing unique column names. If the first row - of a subsequent file is the same as the header, then it is also - treated as a header. All other rows contain values for the - corresponding columns. Each .CSV file by itself must be 10GB or - smaller, and their total size must be 100GB or smaller. First three - sample rows of a CSV file: “Id”,“First Name”,“Last - Name”,“Dob”,“Addresses” “1”,“John”,“Doe”,“1968-01-22”,“[{"status":"cu - rrent","address":"123_First_Avenue","city":"Seattle","state":"WA","zip - ":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Mai - n_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears" - :"5"}]” “2”,“Jane”,“Doe”,“1980-10-16”,“[{"status":"current","address" - :"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOf - Years":"2"},{"status":"previous","address":"321_Main_Street","city":"H - oboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} For - bigquery_source: An URI of a BigQuery table. The user data size of the - BigQuery table must be 100GB or smaller. An imported table must have - between 2 and 1,000 columns, inclusive, and between 1000 and - 100,000,000 rows, inclusive. There are at most 5 import data running - in parallel. Definitions: ML_USE =”TRAIN" \| “VALIDATE” \| “TEST” \| - “UNASSIGNED” Describes how the given example (file) should be used for - model training. “UNASSIGNED” can be used when user has no preference. - GCS_FILE_PATH = A path to file on GCS, e.g. “gs://folder/image1.png”. - LABEL = A display name of an object on an image, video etc., e.g. - “dog”. Must be up to 32 characters long and can consist only of ASCII - Latin letters A-Z and a-z, underscores(_), and ASCII digits 0-9. For - each label an AnnotationSpec is created which display_name becomes the - label; AnnotationSpecs are given back in predictions. INSTANCE_ID = A - positive integer that identifies a specific instance of a labeled - entity on an example. Used e.g. to track two cars on a video while - being able to tell apart which one is which. BOUNDING_BOX = - VERTEX,VERTEX,VERTEX,VERTEX \| VERTEX,,,VERTEX,, A rectangle parallel - to the frame of the example (image, video). If 4 vertices are given - they are connected by edges in the order provided, if 2 are given they - are recognized as diagonally opposite vertices of the rectangle. - VERTEX = COORDINATE,COORDINATE First coordinate is horizontal (x), the - second is vertical (y). COORDINATE = A float in 0 to 1 range, relative - to total length of image or video in given dimension. For fractions - the leading non-decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is - in top left. TIME_SEGMENT_START = TIME_OFFSET Expresses a beginning, - inclusive, of a time segment within an example that has a time - dimension (e.g. video). TIME_SEGMENT_END = TIME_OFFSET Expresses an - end, exclusive, of a time segment within an example that has a time - dimension (e.g. video). TIME_OFFSET = A number of seconds as measured - from the start of an example (e.g. video). Fractions are allowed, up - to a microsecond precision. “inf” is allowed, and it means the end of - the example. TEXT_SNIPPET = A content of a text snippet, UTF-8 - encoded, enclosed within double quotes ("“). SENTIMENT = An integer - between 0 and Dataset.text_sentiment_dataset_metadata.sentiment_max - (inclusive). Describes the ordinal of the sentiment - higher value - means a more positive sentiment. All the values are completely - relative, i.e. neither 0 needs to mean a negative or neutral sentiment - nor sentiment_max needs to mean a positive one - it is just required - that 0 is the least positive sentiment in the data, and sentiment_max - is the most positive one. The SENTIMENT shouldn’t be confused - with”score" or “magnitude” from the previous Natural Language - Sentiment Analysis API. All SENTIMENT values between 0 and - sentiment_max must be represented in the imported data. On prediction - the same 0 to sentiment_max range will be used. The difference between - neighboring sentiment values needs not to be uniform, e.g. 1 and 2 may - be similar whereas the difference between 2 and 3 may be huge. - Errors: If any of the provided CSV files can’t be parsed or if more - than certain percent of CSV rows cannot be processed then the - operation fails and nothing is imported. Regardless of overall success - or failure the per-row failures, up to a certain count cap, is listed - in Operation.metadata.partial_failures. - - Attributes: - source: - The source of the input. - gcs_source: - The Google Cloud Storage location for the input content. In - ImportData, the gcs_source points to a csv with structure - described in the comment. - bigquery_source: - The BigQuery location for the input content. - params: - Additional domain-specific parameters describing the semantic - of the imported data, any string must be up to 25000 - characters long. - For Tables: ``schema_inference_version`` - - (integer) Required. The version of the algorithm that - should be used for the initial inference of the schema - (columns’ DataTypes) of the table the data is being - imported into. Allowed values: “1”. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.InputConfig) - }, -) -_sym_db.RegisterMessage(InputConfig) -_sym_db.RegisterMessage(InputConfig.ParamsEntry) - -BatchPredictInputConfig = _reflection.GeneratedProtocolMessageType( - "BatchPredictInputConfig", - (_message.Message,), - { - "DESCRIPTOR": _BATCHPREDICTINPUTCONFIG, - "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """Input configuration for BatchPredict Action. The format of input - depends on the ML problem of the model used for prediction. As input - source the - [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is - expected, unless specified otherwise. The formats are represented in - EBNF with commas being literal and with non-terminal symbols defined - near the end of this comment. The formats are: - For Image - Classification: CSV file(s) with each line having just a single - column: GCS_FILE_PATH which leads to image of up to 30MB in size. - Supported extensions: .JPEG, .GIF, .PNG. This path is treated as - the ID in the Batch predict output. Three sample rows: - gs://folder/image1.jpeg gs://folder/image2.gif gs://folder/image3.png - - For Image Object Detection: CSV file(s) with each line having just - a single column: GCS_FILE_PATH which leads to image of up to 30MB - in size. Supported extensions: .JPEG, .GIF, .PNG. This path is - treated as the ID in the Batch predict output. Three sample rows: - gs://folder/image1.jpeg gs://folder/image2.gif gs://folder/image3.png - - For Video Classification: CSV file(s) with each line in format: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH leads - to video of up to 50GB in size and up to 3h duration. Supported - extensions: .MOV, .MPEG4, .MP4, .AVI. TIME_SEGMENT_START and - TIME_SEGMENT_END must be within the length of the video, and end has - to be after the start. Three sample rows: - gs://folder/video1.mp4,10,40 gs://folder/video1.mp4,20,60 - gs://folder/vid2.mov,0,inf - For Video Object Tracking: CSV file(s) - with each line in format: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH leads - to video of up to 50GB in size and up to 3h duration. Supported - extensions: .MOV, .MPEG4, .MP4, .AVI. TIME_SEGMENT_START and - TIME_SEGMENT_END must be within the length of the video, and end has - to be after the start. Three sample rows: - gs://folder/video1.mp4,10,240 gs://folder/video1.mp4,300,360 - gs://folder/vid2.mov,0,inf - For Text Classification: CSV file(s) - with each line having just a single column: GCS_FILE_PATH \| - TEXT_SNIPPET Any given text file can have size upto 128kB. Any - given text snippet content must have 60,000 characters or less. - Three sample rows: gs://folder/text1.txt “Some text content to - predict” gs://folder/text3.pdf Supported file extensions: .txt, - .pdf - For Text Sentiment: CSV file(s) with each line having just a - single column: GCS_FILE_PATH \| TEXT_SNIPPET Any given text file - can have size upto 128kB. Any given text snippet content must have - 500 characters or less. Three sample rows: gs://folder/text1.txt - “Some text content to predict” gs://folder/text3.pdf Supported file - extensions: .txt, .pdf - For Text Extraction .JSONL (i.e. JSON - Lines) file(s) which either provide text in-line or as documents - (for a single BatchPredict call only one of the these formats may - be used). The in-line .JSONL file(s) contain per line a proto that - wraps a temporary user-assigned TextSnippet ID (string up to 2000 - characters long) called “id”, a TextSnippet proto (in json - representation) and zero or more TextFeature protos. Any given text - snippet content must have 30,000 characters or less, and also be - UTF-8 NFC encoded (ASCII already is). The IDs provided should be - unique. The document .JSONL file(s) contain, per line, a proto that - wraps a Document proto with input_config set. Only PDF documents - are supported now, and each document must be up to 2MB large. Any - given .JSONL file must be 100MB or smaller, and no more than 20 - files may be given. - For Tables: Either - [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or [ - bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_sour - ce]. GCS case: CSV file(s), each by itself 10GB or smaller and total - size must be 100GB or smaller, where first file must have a header - containing column names. If the first row of a subsequent file is the - same as the header, then it is also treated as a header. All other - rows contain values for the corresponding columns. The column names - must contain the model’s [input_feature_column_specs’][google.cloud.a - utoml.v1beta1.TablesModelMetadata.input_feature_column_specs] - [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] - (order doesn’t matter). The columns corresponding to the model’s input - feature column specs must contain values compatible with the column - spec’s data types. Prediction on all the rows, i.e. the CSV lines, - will be attempted. For FORECASTING [prediction_type][google.cloud.aut - oml.v1beta1.TablesModelMetadata.prediction_type]: all columns having - [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSp - ec.ForecastingMetadata.ColumnType] type will be ignored. First three - sample rows of a CSV file: “First Name”,“Last Name”,“Dob”,“Addresses” - “John”,“Doe”,“1968-01-22”,“[{"status":"current","address":"123_First_A - venue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1" - },{"status":"previous","address":"456_Main_Street","city":"Portland"," - state":"OR","zip":"22222","numberOfYears":"5"}]” “Jane”,“Doe”,“1980-1 - 0-16”,"[{“status”:“current”,“address”:“789_Any_Avenue”,“city”:“Albany” - ,“state”:“NY”,“zip”:“33333”,“numberOfYears”:“2”},{“status”:“previous”, - “address”:“321_Main_Street”,“city”:“Hoboken”,“state”:“NJ”,“zip”:“44444 - ”,“numberOfYears”:“3”}]} BigQuery case: An URI of a BigQuery table. - The user data size of the BigQuery table must be 100GB or smaller. The - column names must contain the model’s [input_feature_column_specs’][g - oogle.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_sp - ecs] - [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] - (order doesn’t matter). The columns corresponding to the model’s input - feature column specs must contain values compatible with the column - spec’s data types. Prediction on all the rows of the table will be - attempted. For FORECASTING [prediction_type][google.cloud.automl.v1be - ta1.TablesModelMetadata.prediction_type]: all columns having [TIME_SE - RIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.Forec - astingMetadata.ColumnType] type will be ignored. Definitions: - GCS_FILE_PATH = A path to file on GCS, e.g. “gs://folder/video.avi”. - TEXT_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed - within double quotes ("“) TIME_SEGMENT_START = TIME_OFFSET Expresses a - beginning, inclusive, of a time segment within an example that has a - time dimension (e.g. video). TIME_SEGMENT_END = TIME_OFFSET Expresses - an end, exclusive, of a time segment within an example that has a time - dimension (e.g. video). TIME_OFFSET = A number of seconds as measured - from the start of an example (e.g. video). Fractions are allowed, up - to a microsecond precision.”inf" is allowed and it means the end of - the example. Errors: If any of the provided CSV files can’t be parsed - or if more than certain percent of CSV rows cannot be processed then - the operation fails and prediction does not happen. Regardless of - overall success or failure the per-row failures, up to a certain count - cap, will be listed in Operation.metadata.partial_failures. - - Attributes: - source: - Required. The source of the input. - gcs_source: - The Google Cloud Storage location for the input content. - bigquery_source: - The BigQuery location for the input content. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BatchPredictInputConfig) - }, -) -_sym_db.RegisterMessage(BatchPredictInputConfig) - -DocumentInputConfig = _reflection.GeneratedProtocolMessageType( - "DocumentInputConfig", - (_message.Message,), - { - "DESCRIPTOR": _DOCUMENTINPUTCONFIG, - "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """Input configuration of a - [Document][google.cloud.automl.v1beta1.Document]. - - Attributes: - gcs_source: - The Google Cloud Storage location of the document file. Only a - single path should be given. Max supported size: 512MB. - Supported extensions: .PDF. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DocumentInputConfig) - }, -) -_sym_db.RegisterMessage(DocumentInputConfig) - -OutputConfig = _reflection.GeneratedProtocolMessageType( - "OutputConfig", - (_message.Message,), - { - "DESCRIPTOR": _OUTPUTCONFIG, - "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """\* For Translation: CSV file ``translation.csv``, with each line in - format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV file which - describes examples that have given ML_USE, using the following row - format per line: TEXT_SNIPPET (in source language) :raw-latex:`\t - `TEXT_SNIPPET (in target language) - For Tables: Output depends on - whether the dataset was imported from GCS or BigQuery. GCS case: [ - gcs_destination][google.cloud.automl.v1beta1.OutputConfig.gcs_destinat - ion] must be set. Exported are CSV file(s) ``tables_1.csv``, - ``tables_2.csv``,…,\ ``tables_N.csv`` with each having as header line - the table’s column names, and all other lines contain values for the - header columns. BigQuery case: [bigquery_destination][google.cloud.au - toml.v1beta1.OutputConfig.bigquery_destination] pointing to a BigQuery - project must be set. In the given project a new dataset will be - created with name ``export_data__`` where will be made BigQuery- - dataset-name compatible (e.g. most special characters will become - underscores), and timestamp will be in YYYY_MM_DDThh_mm_ss_sssZ “based - on ISO-8601” format. In that dataset a new table called - ``primary_table`` will be created, and filled with precisely the same - data as this obtained on import. - - Attributes: - destination: - Required. The destination of the output. - gcs_destination: - The Google Cloud Storage location where the output is to be - written to. For Image Object Detection, Text Extraction, Video - Classification and Tables, in the given directory a new - directory will be created with name: export_data-- where - timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All - export output will be written into that directory. - bigquery_destination: - The BigQuery location where the output is to be written to. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.OutputConfig) - }, -) -_sym_db.RegisterMessage(OutputConfig) - -BatchPredictOutputConfig = _reflection.GeneratedProtocolMessageType( - "BatchPredictOutputConfig", - (_message.Message,), - { - "DESCRIPTOR": _BATCHPREDICTOUTPUTCONFIG, - "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """Output configuration for BatchPredict Action. As destination the [gc - s_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gc - s_destination] must be set unless specified otherwise for a domain. If - gcs_destination is set then in the given directory a new directory is - created. Its name will be “prediction--”, where timestamp is in YYYY- - MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends on the - ML problem the predictions are made for. - For Image Classification: - In the created directory files ``image_classification_1.jsonl``, - ``image_classification_2.jsonl``,…,\ ``image_classification_N.jsonl`` - will be created, where N may be 1, and depends on the total number of - the successfully predicted images and annotations. A single image - will be listed only once with all its annotations, and its - annotations will never be split across files. Each .JSONL file will - contain, per line, a JSON representation of a proto that wraps - image’s “ID” : “” followed by a list of zero or more - AnnotationPayload protos (called annotations), which have - classification detail populated. If prediction for any image failed - (partially or completely), then an additional ``errors_1.jsonl``, - ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N - depends on total number of failed predictions). These files will have - a JSON representation of a proto that wraps the same “ID” : “” but - here followed by exactly one ```google.rpc.Status`` `_\_ - containing only ``code`` and ``message``\ fields. - For Image Object - Detection: In the created directory files - ``image_object_detection_1.jsonl``, - ``image_object_detection_2.jsonl``,…,\ - ``image_object_detection_N.jsonl`` will be created, where N may be - 1, and depends on the total number of the successfully predicted - images and annotations. Each .JSONL file will contain, per line, a - JSON representation of a proto that wraps image’s “ID” : “” - followed by a list of zero or more AnnotationPayload protos (called - annotations), which have image_object_detection detail populated. A - single image will be listed only once with all its annotations, and - its annotations will never be split across files. If prediction for - any image failed (partially or completely), then additional - ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files - will be created (N depends on total number of failed predictions). - These files will have a JSON representation of a proto that wraps - the same “ID” : “” but here followed by exactly one - ```google.rpc.Status`` `__ containing only ``code`` and - ``message``\ fields. \* For Video Classification: In the created - directory a video_classification.csv file, and a .JSON file per each - video classification requested in the input (i.e. each line in given - CSV(s)), will be created. :: The format of - video_classification.csv is: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SE - GMENT_END,JSON_FILE_NAME,STATUS where: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 the - prediction input lines (i.e. video_classification.csv has precisely - the same number of lines as the prediction input had.) JSON_FILE_NAME - = Name of .JSON file in the output directory, which contains - prediction responses for the video time segment. STATUS = “OK” if - prediction completed successfully, or an error code with message - otherwise. If STATUS is not “OK” then the .JSON file for that line may - not exist or be empty. :: Each .JSON file, assuming STATUS is - "OK", will contain a list of AnnotationPayload protos in JSON - format, which are the predictions for the video time segment - the file is assigned to in the video_classification.csv. All - AnnotationPayload protos will have video_classification field - set, and will be sorted by video_classification.type field - (note that the returned types are governed by - `classifaction_types` parameter in - [PredictService.BatchPredictRequest.params][]). - For Video Object - Tracking: In the created directory a video_object_tracking.csv file - will be created, and multiple files video_object_trackinng_1.json, - video_object_trackinng_2.json,…, video_object_trackinng_N.json, - where N is the number of requests in the input (i.e. the number of - lines in given CSV(s)). :: The format of - video_object_tracking.csv is: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_S - EGMENT_END,JSON_FILE_NAME,STATUS where: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 the - prediction input lines (i.e. video_object_tracking.csv has precisely - the same number of lines as the prediction input had.) JSON_FILE_NAME - = Name of .JSON file in the output directory, which contains - prediction responses for the video time segment. STATUS = “OK” if - prediction completed successfully, or an error code with message - otherwise. If STATUS is not “OK” then the .JSON file for that line may - not exist or be empty. :: Each .JSON file, assuming STATUS is - "OK", will contain a list of AnnotationPayload protos in JSON - format, which are the predictions for each frame of the video - time segment the file is assigned to in - video_object_tracking.csv. All AnnotationPayload protos will have - video_object_tracking field set. - For Text Classification: In the - created directory files ``text_classification_1.jsonl``, - ``text_classification_2.jsonl``,…,\ ``text_classification_N.jsonl`` - will be created, where N may be 1, and depends on the total number of - inputs and annotations found. :: Each .JSONL file will - contain, per line, a JSON representation of a proto that wraps - input text snippet or input text file and a list of zero or more - AnnotationPayload protos (called annotations), which have - classification detail populated. A single text snippet or file - will be listed only once with all its annotations, and its - annotations will never be split across files. If prediction for - any text snippet or file failed (partially or completely), then - additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input text snippet or input text file followed by - exactly one ```google.rpc.Status`` `__ containing only - ``code`` and ``message``. - For Text Sentiment: In the created - directory files ``text_sentiment_1.jsonl``, - ``text_sentiment_2.jsonl``,…,\ ``text_sentiment_N.jsonl`` will be - created, where N may be 1, and depends on the total number of inputs - and annotations found. :: Each .JSONL file will contain, - per line, a JSON representation of a proto that wraps input text - snippet or input text file and a list of zero or more - AnnotationPayload protos (called annotations), which have - text_sentiment detail populated. A single text snippet or file - will be listed only once with all its annotations, and its - annotations will never be split across files. If prediction for - any text snippet or file failed (partially or completely), then - additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input text snippet or input text file followed by - exactly one ```google.rpc.Status`` `__ containing only - ``code`` and ``message``. - For Text Extraction: In the created - directory files ``text_extraction_1.jsonl``, - ``text_extraction_2.jsonl``,…,\ ``text_extraction_N.jsonl`` will be - created, where N may be 1, and depends on the total number of inputs - and annotations found. The contents of these .JSONL file(s) depend on - whether the input used inline text, or documents. If input was - inline, then each .JSONL file will contain, per line, a JSON - representation of a proto that wraps given in request text snippet’s - “id” (if specified), followed by input text snippet, and a list of - zero or more AnnotationPayload protos (called annotations), which - have text_extraction detail populated. A single text snippet will be - listed only once with all its annotations, and its annotations will - never be split across files. If input used documents, then each - .JSONL file will contain, per line, a JSON representation of a proto - that wraps given in request document proto, followed by its OCR-ed - representation in the form of a text snippet, finally followed by a - list of zero or more AnnotationPayload protos (called annotations), - which have text_extraction detail populated and refer, via their - indices, to the OCR-ed text snippet. A single document (and its text - snippet) will be listed only once with all its annotations, and its - annotations will never be split across files. If prediction for any - text snippet failed (partially or completely), then additional - ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files - will be created (N depends on total number of failed predictions). - These files will have a JSON representation of a proto that wraps - either the “id” : “” (in case of inline) or the document proto (in - case of document) but here followed by exactly one - ```google.rpc.Status`` `__ containing only ``code`` and - ``message``. - For Tables: Output depends on whether [gcs_destinati - on][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destinati - on] or [bigquery_destination][google.cloud.automl.v1beta1.BatchPredic - tOutputConfig.bigquery_destination] is set (either is allowed). GCS - case: In the created directory files ``tables_1.csv``, - ``tables_2.csv``,…, ``tables_N.csv`` will be created, where N may be - 1, and depends on the total number of the successfully predicted rows. - For all CLASSIFICATION [prediction_type-s][google.cloud.automl.v1beta - 1.TablesModelMetadata.prediction_type]: Each .csv file will contain a - header, listing all columns’ - [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] - given on input followed by M target column names in the format of "<[ - target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.t - arget_column_spec] [display_name][google.cloud.automl.v1beta1.ColumnS - pec.display_name]>\_\_score" where M is the number of distinct target - values, i.e. number of distinct values in the target column of the - table used to train the model. Subsequent lines will contain the - respective values of successfully predicted rows, with the last, - i.e. the target, columns having the corresponding prediction - [scores][google.cloud.automl.v1beta1.TablesAnnotation.score]. For - REGRESSION and FORECASTING [prediction_type-s][google.cloud.automl.v1 - beta1.TablesModelMetadata.prediction_type]: Each .csv file will - contain a header, listing all columns’ - [display_name-s][google.cloud.automl.v1beta1.display_name] given on - input followed by the predicted target column with name in the format - of "predicted_<[target_column_specs][google.cloud.automl.v1beta1.Tabl - esModelMetadata.target_column_spec] - [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" - Subsequent lines will contain the respective values of successfully - predicted rows, with the last, i.e. the target, column having the - predicted target value. If prediction for any rows failed, then an - additional ``errors_1.csv``, ``errors_2.csv``,…, ``errors_N.csv`` will - be created (N depends on total number of failed rows). These files - will have analogous format as ``tables_*.csv``, but always with a - single target column having ```google.rpc.Status`` `_\_ - represented as a JSON string, and containing only ``code`` and - ``message``. BigQuery case: [bigquery_destination][google.cloud.autom - l.v1beta1.OutputConfig.bigquery_destination] pointing to a BigQuery - project must be set. In the given project a new dataset will be - created with name ``prediction__`` where will be made BigQuery-dataset-name compatible - (e.g. most special characters will become underscores), and timestamp - will be in YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In the - dataset two tables will be created, ``predictions``, and ``errors``. - The ``predictions`` table’s column names will be the input columns’ - [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] - followed by the target column with name in the format of "predicted_< - [target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata. - target_column_spec] - [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" - The input feature columns will contain the respective values of - successfully predicted rows, with the target column having an ARRAY of - [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], - represented as STRUCT-s, containing - [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. The - ``errors`` table contains rows for which the prediction has failed, it - has analogous input columns while the target column name is in the - format of "errors_<[target_column_specs][google.cloud.automl.v1beta1. - TablesModelMetadata.target_column_spec] - [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>", - and as a value has ```google.rpc.Status`` `__ represented - as a STRUCT, and containing only ``code`` and ``message``. - - Attributes: - destination: - Required. The destination of the output. - gcs_destination: - The Google Cloud Storage location of the directory where the - output is to be written to. - bigquery_destination: - The BigQuery location where the output is to be written to. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BatchPredictOutputConfig) - }, -) -_sym_db.RegisterMessage(BatchPredictOutputConfig) - -ModelExportOutputConfig = _reflection.GeneratedProtocolMessageType( - "ModelExportOutputConfig", - (_message.Message,), - { - "ParamsEntry": _reflection.GeneratedProtocolMessageType( - "ParamsEntry", - (_message.Message,), - { - "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG_PARAMSENTRY, - "__module__": "google.cloud.automl_v1beta1.proto.io_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ModelExportOutputConfig.ParamsEntry) - }, - ), - "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG, - "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """Output configuration for ModelExport Action. - - Attributes: - destination: - Required. The destination of the output. - gcs_destination: - The Google Cloud Storage location where the model is to be - written to. This location may only be set for the following - model formats: “tflite”, “edgetpu_tflite”, “tf_saved_model”, - “tf_js”, “core_ml”. Under the directory given as the - destination a new one with name “model-export--”, where - timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will - be created. Inside the model and any of its supporting files - will be written. - gcr_destination: - The GCR location where model image is to be pushed to. This - location may only be set for the following model formats: - “docker”. The model image will be created under the given - URI. - model_format: - The format in which the model must be exported. The available, - and default, formats depend on the problem and model type (if - given problem and type combination doesn’t have a format - listed, it means its models are not exportable): - For Image - Classification mobile-low-latency-1, mobile-versatile-1, - mobile-high-accuracy-1: “tflite” (default), “edgetpu_tflite”, - “tf_saved_model”, “tf_js”, “docker”. - For Image - Classification mobile-core-ml-low-latency-1, mobile-core- - ml-versatile-1, mobile-core-ml-high-accuracy-1: “core_ml” - (default). Formats description: - tflite - Used for Android - mobile devices. - edgetpu_tflite - Used for `Edge TPU - `__ devices. - - tf_saved_model - A tensorflow model in SavedModel format. - - tf_js - A `TensorFlow.js `_\_ - model that can be used in the browser and in Node.js using - JavaScript. - docker - Used for Docker containers. Use the - params field to customize the container. The container is - verified to work correctly on ubuntu 16.04 operating - system. See more at [containers quickstart](https: - //cloud.google.com/vision/automl/docs/containers-gcs- - quickstart) \* core_ml - Used for iOS mobile devices. - params: - Additional model-type and format specific parameters - describing the requirements for the to be exported model - files, any string must be up to 25000 characters long. - For - ``docker`` format: ``cpu_architecture`` - (string) “x86_64” - (default). ``gpu_architecture`` - (string) “none” (default), - “nvidia”. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ModelExportOutputConfig) - }, -) -_sym_db.RegisterMessage(ModelExportOutputConfig) -_sym_db.RegisterMessage(ModelExportOutputConfig.ParamsEntry) - -ExportEvaluatedExamplesOutputConfig = _reflection.GeneratedProtocolMessageType( - "ExportEvaluatedExamplesOutputConfig", - (_message.Message,), - { - "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESOUTPUTCONFIG, - "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """Output configuration for ExportEvaluatedExamples Action. Note that - this call is available only for 30 days since the moment the model was - evaluated. The output depends on the domain, as follows (note that - only examples from the TEST set are exported): - For Tables: [bigqu - ery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_des - tination] pointing to a BigQuery project must be set. In the given - project a new dataset will be created with name - ``export_evaluated_examples__`` where will be made BigQuery-dataset-name compatible (e.g. most - special characters will become underscores), and timestamp will be in - YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In the dataset an - ``evaluated_examples`` table will be created. It will have all the - same columns as the [primary_table][google.cloud.automl.v1beta1.Table - sDatasetMetadata.primary_table_spec_id] of the - [dataset][google.cloud.automl.v1beta1.Model.dataset_id] from which the - model was created, as they were at the moment of model’s evaluation - (this includes the target column with its ground truth), followed by a - column called “predicted\_”. That last column will contain the model’s - prediction result for each respective row, given as ARRAY of - [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], - represented as STRUCT-s, containing - [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. - - Attributes: - destination: - Required. The destination of the output. - bigquery_destination: - The BigQuery location where the output is to be written to. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig) - }, -) -_sym_db.RegisterMessage(ExportEvaluatedExamplesOutputConfig) - -GcsSource = _reflection.GeneratedProtocolMessageType( - "GcsSource", - (_message.Message,), - { - "DESCRIPTOR": _GCSSOURCE, - "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """The Google Cloud Storage location for the input content. - - Attributes: - input_uris: - Required. Google Cloud Storage URIs to input files, up to 2000 - characters long. Accepted forms: \* Full object path, - e.g. gs://bucket/directory/object.csv - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GcsSource) - }, -) -_sym_db.RegisterMessage(GcsSource) - -BigQuerySource = _reflection.GeneratedProtocolMessageType( - "BigQuerySource", - (_message.Message,), - { - "DESCRIPTOR": _BIGQUERYSOURCE, - "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """The BigQuery location for the input content. - - Attributes: - input_uri: - Required. BigQuery URI to a table, up to 2000 characters long. - Accepted forms: \* BigQuery path - e.g. bq://projectId.bqDatasetId.bqTableId - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BigQuerySource) - }, -) -_sym_db.RegisterMessage(BigQuerySource) - -GcsDestination = _reflection.GeneratedProtocolMessageType( - "GcsDestination", - (_message.Message,), - { - "DESCRIPTOR": _GCSDESTINATION, - "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """The Google Cloud Storage location where the output is to be written - to. - - Attributes: - output_uri_prefix: - Required. Google Cloud Storage URI to output directory, up to - 2000 characters long. Accepted forms: \* Prefix path: - gs://bucket/directory The requesting user must have write - permission to the bucket. The directory is created if it - doesn’t exist. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GcsDestination) - }, -) -_sym_db.RegisterMessage(GcsDestination) - -BigQueryDestination = _reflection.GeneratedProtocolMessageType( - "BigQueryDestination", - (_message.Message,), - { - "DESCRIPTOR": _BIGQUERYDESTINATION, - "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """The BigQuery location for the output content. - - Attributes: - output_uri: - Required. BigQuery URI to a project, up to 2000 characters - long. Accepted forms: \* BigQuery path e.g. bq://projectId - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BigQueryDestination) - }, -) -_sym_db.RegisterMessage(BigQueryDestination) - -GcrDestination = _reflection.GeneratedProtocolMessageType( - "GcrDestination", - (_message.Message,), - { - "DESCRIPTOR": _GCRDESTINATION, - "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """The GCR location where the image must be pushed to. - - Attributes: - output_uri: - Required. Google Contained Registry URI of the new image, up - to 2000 characters long. See https: - //cloud.google.com/container-registry/do // cs/pushing-and- - pulling#pushing_an_image_to_a_registry Accepted forms: \* - [HOSTNAME]/[PROJECT-ID]/[IMAGE] \* [HOSTNAME]/[PROJECT- - ID]/[IMAGE]:[TAG] The requesting user must have permission to - push images the project. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GcrDestination) - }, -) -_sym_db.RegisterMessage(GcrDestination) - - -DESCRIPTOR._options = None -_INPUTCONFIG_PARAMSENTRY._options = None -_MODELEXPORTOUTPUTCONFIG_PARAMSENTRY._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/io_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/io_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/io_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/model.proto b/google/cloud/automl_v1beta1/proto/model.proto deleted file mode 100644 index 2b2e8d73..00000000 --- a/google/cloud/automl_v1beta1/proto/model.proto +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/api/resource.proto"; -import "google/cloud/automl/v1beta1/image.proto"; -import "google/cloud/automl/v1beta1/tables.proto"; -import "google/cloud/automl/v1beta1/text.proto"; -import "google/cloud/automl/v1beta1/translation.proto"; -import "google/cloud/automl/v1beta1/video.proto"; -import "google/protobuf/timestamp.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// API proto representing a trained machine learning model. -message Model { - option (google.api.resource) = { - type: "automl.googleapis.com/Model" - pattern: "projects/{project}/locations/{location}/models/{model}" - }; - - // Deployment state of the model. - enum DeploymentState { - // Should not be used, an un-set enum has this value by default. - DEPLOYMENT_STATE_UNSPECIFIED = 0; - - // Model is deployed. - DEPLOYED = 1; - - // Model is not deployed. - UNDEPLOYED = 2; - } - - // Required. - // The model metadata that is specific to the problem type. - // Must match the metadata type of the dataset used to train the model. - oneof model_metadata { - // Metadata for translation models. - TranslationModelMetadata translation_model_metadata = 15; - - // Metadata for image classification models. - ImageClassificationModelMetadata image_classification_model_metadata = 13; - - // Metadata for text classification models. - TextClassificationModelMetadata text_classification_model_metadata = 14; - - // Metadata for image object detection models. - ImageObjectDetectionModelMetadata image_object_detection_model_metadata = 20; - - // Metadata for video classification models. - VideoClassificationModelMetadata video_classification_model_metadata = 23; - - // Metadata for video object tracking models. - VideoObjectTrackingModelMetadata video_object_tracking_model_metadata = 21; - - // Metadata for text extraction models. - TextExtractionModelMetadata text_extraction_model_metadata = 19; - - // Metadata for Tables models. - TablesModelMetadata tables_model_metadata = 24; - - // Metadata for text sentiment models. - TextSentimentModelMetadata text_sentiment_model_metadata = 22; - } - - // Output only. Resource name of the model. - // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}` - string name = 1; - - // Required. The name of the model to show in the interface. The name can be - // up to 32 characters long and can consist only of ASCII Latin letters A-Z - // and a-z, underscores - // (_), and ASCII digits 0-9. It must start with a letter. - string display_name = 2; - - // Required. The resource ID of the dataset used to create the model. The dataset must - // come from the same ancestor project and location. - string dataset_id = 3; - - // Output only. Timestamp when the model training finished and can be used for prediction. - google.protobuf.Timestamp create_time = 7; - - // Output only. Timestamp when this model was last updated. - google.protobuf.Timestamp update_time = 11; - - // Output only. Deployment state of the model. A model can only serve - // prediction requests after it gets deployed. - DeploymentState deployment_state = 8; -} diff --git a/google/cloud/automl_v1beta1/proto/model_evaluation.proto b/google/cloud/automl_v1beta1/proto/model_evaluation.proto deleted file mode 100644 index d5633fcd..00000000 --- a/google/cloud/automl_v1beta1/proto/model_evaluation.proto +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/api/resource.proto"; -import "google/cloud/automl/v1beta1/classification.proto"; -import "google/cloud/automl/v1beta1/detection.proto"; -import "google/cloud/automl/v1beta1/regression.proto"; -import "google/cloud/automl/v1beta1/tables.proto"; -import "google/cloud/automl/v1beta1/text_extraction.proto"; -import "google/cloud/automl/v1beta1/text_sentiment.proto"; -import "google/cloud/automl/v1beta1/translation.proto"; -import "google/protobuf/timestamp.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// Evaluation results of a model. -message ModelEvaluation { - option (google.api.resource) = { - type: "automl.googleapis.com/ModelEvaluation" - pattern: "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}" - }; - - // Output only. Problem type specific evaluation metrics. - oneof metrics { - // Model evaluation metrics for image, text, video and tables - // classification. - // Tables problem is considered a classification when the target column - // is CATEGORY DataType. - ClassificationEvaluationMetrics classification_evaluation_metrics = 8; - - // Model evaluation metrics for Tables regression. - // Tables problem is considered a regression when the target column - // has FLOAT64 DataType. - RegressionEvaluationMetrics regression_evaluation_metrics = 24; - - // Model evaluation metrics for translation. - TranslationEvaluationMetrics translation_evaluation_metrics = 9; - - // Model evaluation metrics for image object detection. - ImageObjectDetectionEvaluationMetrics image_object_detection_evaluation_metrics = 12; - - // Model evaluation metrics for video object tracking. - VideoObjectTrackingEvaluationMetrics video_object_tracking_evaluation_metrics = 14; - - // Evaluation metrics for text sentiment models. - TextSentimentEvaluationMetrics text_sentiment_evaluation_metrics = 11; - - // Evaluation metrics for text extraction models. - TextExtractionEvaluationMetrics text_extraction_evaluation_metrics = 13; - } - - // Output only. Resource name of the model evaluation. - // Format: - // - // `projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}` - string name = 1; - - // Output only. The ID of the annotation spec that the model evaluation applies to. The - // The ID is empty for the overall model evaluation. - // For Tables annotation specs in the dataset do not exist and this ID is - // always not set, but for CLASSIFICATION - // - // [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type] - // the - // [display_name][google.cloud.automl.v1beta1.ModelEvaluation.display_name] - // field is used. - string annotation_spec_id = 2; - - // Output only. The value of - // [display_name][google.cloud.automl.v1beta1.AnnotationSpec.display_name] at - // the moment when the model was trained. Because this field returns a value - // at model training time, for different models trained from the same dataset, - // the values may differ, since display names could had been changed between - // the two model's trainings. - // For Tables CLASSIFICATION - // - // [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type] - // distinct values of the target column at the moment of the model evaluation - // are populated here. - // The display_name is empty for the overall model evaluation. - string display_name = 15; - - // Output only. Timestamp when this model evaluation was created. - google.protobuf.Timestamp create_time = 5; - - // Output only. The number of examples used for model evaluation, i.e. for - // which ground truth from time of model creation is compared against the - // predicted annotations created by the model. - // For overall ModelEvaluation (i.e. with annotation_spec_id not set) this is - // the total number of all examples used for evaluation. - // Otherwise, this is the count of examples that according to the ground - // truth were annotated by the - // - // [annotation_spec_id][google.cloud.automl.v1beta1.ModelEvaluation.annotation_spec_id]. - int32 evaluated_example_count = 6; -} diff --git a/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py b/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py deleted file mode 100644 index 4dee2ad7..00000000 --- a/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py +++ /dev/null @@ -1,479 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/model_evaluation.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.automl_v1beta1.proto import ( - classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - detection_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - regression_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_regression__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - tables_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - text_extraction_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__extraction__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - text_sentiment_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__sentiment__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - translation_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_translation__pb2, -) -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/model_evaluation.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n8google/cloud/automl_v1beta1/proto/model_evaluation.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x31google/cloud/automl_v1beta1/proto/detection.proto\x1a\x32google/cloud/automl_v1beta1/proto/regression.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a\x37google/cloud/automl_v1beta1/proto/text_extraction.proto\x1a\x36google/cloud/automl_v1beta1/proto/text_sentiment.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xb1\x08\n\x0fModelEvaluation\x12i\n!classification_evaluation_metrics\x18\x08 \x01(\x0b\x32<.google.cloud.automl.v1beta1.ClassificationEvaluationMetricsH\x00\x12\x61\n\x1dregression_evaluation_metrics\x18\x18 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.RegressionEvaluationMetricsH\x00\x12\x63\n\x1etranslation_evaluation_metrics\x18\t \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TranslationEvaluationMetricsH\x00\x12w\n)image_object_detection_evaluation_metrics\x18\x0c \x01(\x0b\x32\x42.google.cloud.automl.v1beta1.ImageObjectDetectionEvaluationMetricsH\x00\x12u\n(video_object_tracking_evaluation_metrics\x18\x0e \x01(\x0b\x32\x41.google.cloud.automl.v1beta1.VideoObjectTrackingEvaluationMetricsH\x00\x12h\n!text_sentiment_evaluation_metrics\x18\x0b \x01(\x0b\x32;.google.cloud.automl.v1beta1.TextSentimentEvaluationMetricsH\x00\x12j\n"text_extraction_evaluation_metrics\x18\r \x01(\x0b\x32<.google.cloud.automl.v1beta1.TextExtractionEvaluationMetricsH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1a\n\x12\x61nnotation_spec_id\x18\x02 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x0f \x01(\t\x12/\n\x0b\x63reate_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x17\x65valuated_example_count\x18\x06 \x01(\x05:\x87\x01\xea\x41\x83\x01\n%automl.googleapis.com/ModelEvaluation\x12Zprojects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}B\t\n\x07metricsB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_regression__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__extraction__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__sentiment__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_translation__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_MODELEVALUATION = _descriptor.Descriptor( - name="ModelEvaluation", - full_name="google.cloud.automl.v1beta1.ModelEvaluation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="classification_evaluation_metrics", - full_name="google.cloud.automl.v1beta1.ModelEvaluation.classification_evaluation_metrics", - index=0, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="regression_evaluation_metrics", - full_name="google.cloud.automl.v1beta1.ModelEvaluation.regression_evaluation_metrics", - index=1, - number=24, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="translation_evaluation_metrics", - full_name="google.cloud.automl.v1beta1.ModelEvaluation.translation_evaluation_metrics", - index=2, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="image_object_detection_evaluation_metrics", - full_name="google.cloud.automl.v1beta1.ModelEvaluation.image_object_detection_evaluation_metrics", - index=3, - number=12, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="video_object_tracking_evaluation_metrics", - full_name="google.cloud.automl.v1beta1.ModelEvaluation.video_object_tracking_evaluation_metrics", - index=4, - number=14, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="text_sentiment_evaluation_metrics", - full_name="google.cloud.automl.v1beta1.ModelEvaluation.text_sentiment_evaluation_metrics", - index=5, - number=11, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="text_extraction_evaluation_metrics", - full_name="google.cloud.automl.v1beta1.ModelEvaluation.text_extraction_evaluation_metrics", - index=6, - number=13, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.ModelEvaluation.name", - index=7, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="annotation_spec_id", - full_name="google.cloud.automl.v1beta1.ModelEvaluation.annotation_spec_id", - index=8, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.cloud.automl.v1beta1.ModelEvaluation.display_name", - index=9, - number=15, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create_time", - full_name="google.cloud.automl.v1beta1.ModelEvaluation.create_time", - index=10, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="evaluated_example_count", - full_name="google.cloud.automl.v1beta1.ModelEvaluation.evaluated_example_count", - index=11, - number=6, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"\352A\203\001\n%automl.googleapis.com/ModelEvaluation\022Zprojects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="metrics", - full_name="google.cloud.automl.v1beta1.ModelEvaluation.metrics", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=553, - serialized_end=1626, -) - -_MODELEVALUATION.fields_by_name[ - "classification_evaluation_metrics" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2._CLASSIFICATIONEVALUATIONMETRICS -) -_MODELEVALUATION.fields_by_name[ - "regression_evaluation_metrics" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_regression__pb2._REGRESSIONEVALUATIONMETRICS -) -_MODELEVALUATION.fields_by_name[ - "translation_evaluation_metrics" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_translation__pb2._TRANSLATIONEVALUATIONMETRICS -) -_MODELEVALUATION.fields_by_name[ - "image_object_detection_evaluation_metrics" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2._IMAGEOBJECTDETECTIONEVALUATIONMETRICS -) -_MODELEVALUATION.fields_by_name[ - "video_object_tracking_evaluation_metrics" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2._VIDEOOBJECTTRACKINGEVALUATIONMETRICS -) -_MODELEVALUATION.fields_by_name[ - "text_sentiment_evaluation_metrics" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__sentiment__pb2._TEXTSENTIMENTEVALUATIONMETRICS -) -_MODELEVALUATION.fields_by_name[ - "text_extraction_evaluation_metrics" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__extraction__pb2._TEXTEXTRACTIONEVALUATIONMETRICS -) -_MODELEVALUATION.fields_by_name[ - "create_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_MODELEVALUATION.oneofs_by_name["metrics"].fields.append( - _MODELEVALUATION.fields_by_name["classification_evaluation_metrics"] -) -_MODELEVALUATION.fields_by_name[ - "classification_evaluation_metrics" -].containing_oneof = _MODELEVALUATION.oneofs_by_name["metrics"] -_MODELEVALUATION.oneofs_by_name["metrics"].fields.append( - _MODELEVALUATION.fields_by_name["regression_evaluation_metrics"] -) -_MODELEVALUATION.fields_by_name[ - "regression_evaluation_metrics" -].containing_oneof = _MODELEVALUATION.oneofs_by_name["metrics"] -_MODELEVALUATION.oneofs_by_name["metrics"].fields.append( - _MODELEVALUATION.fields_by_name["translation_evaluation_metrics"] -) -_MODELEVALUATION.fields_by_name[ - "translation_evaluation_metrics" -].containing_oneof = _MODELEVALUATION.oneofs_by_name["metrics"] -_MODELEVALUATION.oneofs_by_name["metrics"].fields.append( - _MODELEVALUATION.fields_by_name["image_object_detection_evaluation_metrics"] -) -_MODELEVALUATION.fields_by_name[ - "image_object_detection_evaluation_metrics" -].containing_oneof = _MODELEVALUATION.oneofs_by_name["metrics"] -_MODELEVALUATION.oneofs_by_name["metrics"].fields.append( - _MODELEVALUATION.fields_by_name["video_object_tracking_evaluation_metrics"] -) -_MODELEVALUATION.fields_by_name[ - "video_object_tracking_evaluation_metrics" -].containing_oneof = _MODELEVALUATION.oneofs_by_name["metrics"] -_MODELEVALUATION.oneofs_by_name["metrics"].fields.append( - _MODELEVALUATION.fields_by_name["text_sentiment_evaluation_metrics"] -) -_MODELEVALUATION.fields_by_name[ - "text_sentiment_evaluation_metrics" -].containing_oneof = _MODELEVALUATION.oneofs_by_name["metrics"] -_MODELEVALUATION.oneofs_by_name["metrics"].fields.append( - _MODELEVALUATION.fields_by_name["text_extraction_evaluation_metrics"] -) -_MODELEVALUATION.fields_by_name[ - "text_extraction_evaluation_metrics" -].containing_oneof = _MODELEVALUATION.oneofs_by_name["metrics"] -DESCRIPTOR.message_types_by_name["ModelEvaluation"] = _MODELEVALUATION -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ModelEvaluation = _reflection.GeneratedProtocolMessageType( - "ModelEvaluation", - (_message.Message,), - { - "DESCRIPTOR": _MODELEVALUATION, - "__module__": "google.cloud.automl_v1beta1.proto.model_evaluation_pb2", - "__doc__": """Evaluation results of a model. - - Attributes: - metrics: - Output only. Problem type specific evaluation metrics. - classification_evaluation_metrics: - Model evaluation metrics for image, text, video and tables - classification. Tables problem is considered a classification - when the target column is CATEGORY DataType. - regression_evaluation_metrics: - Model evaluation metrics for Tables regression. Tables problem - is considered a regression when the target column has FLOAT64 - DataType. - translation_evaluation_metrics: - Model evaluation metrics for translation. - image_object_detection_evaluation_metrics: - Model evaluation metrics for image object detection. - video_object_tracking_evaluation_metrics: - Model evaluation metrics for video object tracking. - text_sentiment_evaluation_metrics: - Evaluation metrics for text sentiment models. - text_extraction_evaluation_metrics: - Evaluation metrics for text extraction models. - name: - Output only. Resource name of the model evaluation. Format: ` - `projects/{project_id}/locations/{location_id}/models/{model_i - d}/modelEvaluations/{model_evaluation_id}`` - annotation_spec_id: - Output only. The ID of the annotation spec that the model - evaluation applies to. The The ID is empty for the overall - model evaluation. For Tables annotation specs in the dataset - do not exist and this ID is always not set, but for - CLASSIFICATION [prediction_type-s][google.cloud.automl.v1beta - 1.TablesModelMetadata.prediction_type] the [display_name][goog - le.cloud.automl.v1beta1.ModelEvaluation.display_name] field is - used. - display_name: - Output only. The value of [display_name][google.cloud.automl.v - 1beta1.AnnotationSpec.display_name] at the moment when the - model was trained. Because this field returns a value at model - training time, for different models trained from the same - dataset, the values may differ, since display names could had - been changed between the two model’s trainings. For Tables - CLASSIFICATION [prediction_type-s][google.cloud.automl.v1beta - 1.TablesModelMetadata.prediction_type] distinct values of the - target column at the moment of the model evaluation are - populated here. The display_name is empty for the overall - model evaluation. - create_time: - Output only. Timestamp when this model evaluation was created. - evaluated_example_count: - Output only. The number of examples used for model evaluation, - i.e. for which ground truth from time of model creation is - compared against the predicted annotations created by the - model. For overall ModelEvaluation (i.e. with - annotation_spec_id not set) this is the total number of all - examples used for evaluation. Otherwise, this is the count of - examples that according to the ground truth were annotated by - the [annotation_spec_id][google.cloud.automl.v1beta1.ModelEva - luation.annotation_spec_id]. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ModelEvaluation) - }, -) -_sym_db.RegisterMessage(ModelEvaluation) - - -DESCRIPTOR._options = None -_MODELEVALUATION._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/model_evaluation_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/model_evaluation_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/model_evaluation_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/model_pb2.py b/google/cloud/automl_v1beta1/proto/model_pb2.py deleted file mode 100644 index cf935cca..00000000 --- a/google/cloud/automl_v1beta1/proto/model_pb2.py +++ /dev/null @@ -1,580 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/model.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.automl_v1beta1.proto import ( - image_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - tables_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - text_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - translation_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_translation__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - video_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_video__pb2, -) -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/model.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n-google/cloud/automl_v1beta1/proto/model.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a,google/cloud/automl_v1beta1/proto/text.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a-google/cloud/automl_v1beta1/proto/video.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xcc\n\n\x05Model\x12[\n\x1atranslation_model_metadata\x18\x0f \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.TranslationModelMetadataH\x00\x12l\n#image_classification_model_metadata\x18\r \x01(\x0b\x32=.google.cloud.automl.v1beta1.ImageClassificationModelMetadataH\x00\x12j\n"text_classification_model_metadata\x18\x0e \x01(\x0b\x32<.google.cloud.automl.v1beta1.TextClassificationModelMetadataH\x00\x12o\n%image_object_detection_model_metadata\x18\x14 \x01(\x0b\x32>.google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadataH\x00\x12l\n#video_classification_model_metadata\x18\x17 \x01(\x0b\x32=.google.cloud.automl.v1beta1.VideoClassificationModelMetadataH\x00\x12m\n$video_object_tracking_model_metadata\x18\x15 \x01(\x0b\x32=.google.cloud.automl.v1beta1.VideoObjectTrackingModelMetadataH\x00\x12\x62\n\x1etext_extraction_model_metadata\x18\x13 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.TextExtractionModelMetadataH\x00\x12Q\n\x15tables_model_metadata\x18\x18 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.TablesModelMetadataH\x00\x12`\n\x1dtext_sentiment_model_metadata\x18\x16 \x01(\x0b\x32\x37.google.cloud.automl.v1beta1.TextSentimentModelMetadataH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x12\n\ndataset_id\x18\x03 \x01(\t\x12/\n\x0b\x63reate_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12L\n\x10\x64\x65ployment_state\x18\x08 \x01(\x0e\x32\x32.google.cloud.automl.v1beta1.Model.DeploymentState"Q\n\x0f\x44\x65ploymentState\x12 \n\x1c\x44\x45PLOYMENT_STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x44\x45PLOYED\x10\x01\x12\x0e\n\nUNDEPLOYED\x10\x02:X\xea\x41U\n\x1b\x61utoml.googleapis.com/Model\x12\x36projects/{project}/locations/{location}/models/{model}B\x10\n\x0emodel_metadataB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_translation__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_video__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_MODEL_DEPLOYMENTSTATE = _descriptor.EnumDescriptor( - name="DeploymentState", - full_name="google.cloud.automl.v1beta1.Model.DeploymentState", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="DEPLOYMENT_STATE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DEPLOYED", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="UNDEPLOYED", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1577, - serialized_end=1658, -) -_sym_db.RegisterEnumDescriptor(_MODEL_DEPLOYMENTSTATE) - - -_MODEL = _descriptor.Descriptor( - name="Model", - full_name="google.cloud.automl.v1beta1.Model", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="translation_model_metadata", - full_name="google.cloud.automl.v1beta1.Model.translation_model_metadata", - index=0, - number=15, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="image_classification_model_metadata", - full_name="google.cloud.automl.v1beta1.Model.image_classification_model_metadata", - index=1, - number=13, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="text_classification_model_metadata", - full_name="google.cloud.automl.v1beta1.Model.text_classification_model_metadata", - index=2, - number=14, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="image_object_detection_model_metadata", - full_name="google.cloud.automl.v1beta1.Model.image_object_detection_model_metadata", - index=3, - number=20, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="video_classification_model_metadata", - full_name="google.cloud.automl.v1beta1.Model.video_classification_model_metadata", - index=4, - number=23, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="video_object_tracking_model_metadata", - full_name="google.cloud.automl.v1beta1.Model.video_object_tracking_model_metadata", - index=5, - number=21, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="text_extraction_model_metadata", - full_name="google.cloud.automl.v1beta1.Model.text_extraction_model_metadata", - index=6, - number=19, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="tables_model_metadata", - full_name="google.cloud.automl.v1beta1.Model.tables_model_metadata", - index=7, - number=24, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="text_sentiment_model_metadata", - full_name="google.cloud.automl.v1beta1.Model.text_sentiment_model_metadata", - index=8, - number=22, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.Model.name", - index=9, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.cloud.automl.v1beta1.Model.display_name", - index=10, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="dataset_id", - full_name="google.cloud.automl.v1beta1.Model.dataset_id", - index=11, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create_time", - full_name="google.cloud.automl.v1beta1.Model.create_time", - index=12, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_time", - full_name="google.cloud.automl.v1beta1.Model.update_time", - index=13, - number=11, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="deployment_state", - full_name="google.cloud.automl.v1beta1.Model.deployment_state", - index=14, - number=8, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_MODEL_DEPLOYMENTSTATE,], - serialized_options=b"\352AU\n\033automl.googleapis.com/Model\0226projects/{project}/locations/{location}/models/{model}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="model_metadata", - full_name="google.cloud.automl.v1beta1.Model.model_metadata", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=410, - serialized_end=1766, -) - -_MODEL.fields_by_name[ - "translation_model_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_translation__pb2._TRANSLATIONMODELMETADATA -) -_MODEL.fields_by_name[ - "image_classification_model_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2._IMAGECLASSIFICATIONMODELMETADATA -) -_MODEL.fields_by_name[ - "text_classification_model_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__pb2._TEXTCLASSIFICATIONMODELMETADATA -) -_MODEL.fields_by_name[ - "image_object_detection_model_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2._IMAGEOBJECTDETECTIONMODELMETADATA -) -_MODEL.fields_by_name[ - "video_classification_model_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_video__pb2._VIDEOCLASSIFICATIONMODELMETADATA -) -_MODEL.fields_by_name[ - "video_object_tracking_model_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_video__pb2._VIDEOOBJECTTRACKINGMODELMETADATA -) -_MODEL.fields_by_name[ - "text_extraction_model_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__pb2._TEXTEXTRACTIONMODELMETADATA -) -_MODEL.fields_by_name[ - "tables_model_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2._TABLESMODELMETADATA -) -_MODEL.fields_by_name[ - "text_sentiment_model_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__pb2._TEXTSENTIMENTMODELMETADATA -) -_MODEL.fields_by_name[ - "create_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_MODEL.fields_by_name[ - "update_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_MODEL.fields_by_name["deployment_state"].enum_type = _MODEL_DEPLOYMENTSTATE -_MODEL_DEPLOYMENTSTATE.containing_type = _MODEL -_MODEL.oneofs_by_name["model_metadata"].fields.append( - _MODEL.fields_by_name["translation_model_metadata"] -) -_MODEL.fields_by_name[ - "translation_model_metadata" -].containing_oneof = _MODEL.oneofs_by_name["model_metadata"] -_MODEL.oneofs_by_name["model_metadata"].fields.append( - _MODEL.fields_by_name["image_classification_model_metadata"] -) -_MODEL.fields_by_name[ - "image_classification_model_metadata" -].containing_oneof = _MODEL.oneofs_by_name["model_metadata"] -_MODEL.oneofs_by_name["model_metadata"].fields.append( - _MODEL.fields_by_name["text_classification_model_metadata"] -) -_MODEL.fields_by_name[ - "text_classification_model_metadata" -].containing_oneof = _MODEL.oneofs_by_name["model_metadata"] -_MODEL.oneofs_by_name["model_metadata"].fields.append( - _MODEL.fields_by_name["image_object_detection_model_metadata"] -) -_MODEL.fields_by_name[ - "image_object_detection_model_metadata" -].containing_oneof = _MODEL.oneofs_by_name["model_metadata"] -_MODEL.oneofs_by_name["model_metadata"].fields.append( - _MODEL.fields_by_name["video_classification_model_metadata"] -) -_MODEL.fields_by_name[ - "video_classification_model_metadata" -].containing_oneof = _MODEL.oneofs_by_name["model_metadata"] -_MODEL.oneofs_by_name["model_metadata"].fields.append( - _MODEL.fields_by_name["video_object_tracking_model_metadata"] -) -_MODEL.fields_by_name[ - "video_object_tracking_model_metadata" -].containing_oneof = _MODEL.oneofs_by_name["model_metadata"] -_MODEL.oneofs_by_name["model_metadata"].fields.append( - _MODEL.fields_by_name["text_extraction_model_metadata"] -) -_MODEL.fields_by_name[ - "text_extraction_model_metadata" -].containing_oneof = _MODEL.oneofs_by_name["model_metadata"] -_MODEL.oneofs_by_name["model_metadata"].fields.append( - _MODEL.fields_by_name["tables_model_metadata"] -) -_MODEL.fields_by_name["tables_model_metadata"].containing_oneof = _MODEL.oneofs_by_name[ - "model_metadata" -] -_MODEL.oneofs_by_name["model_metadata"].fields.append( - _MODEL.fields_by_name["text_sentiment_model_metadata"] -) -_MODEL.fields_by_name[ - "text_sentiment_model_metadata" -].containing_oneof = _MODEL.oneofs_by_name["model_metadata"] -DESCRIPTOR.message_types_by_name["Model"] = _MODEL -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Model = _reflection.GeneratedProtocolMessageType( - "Model", - (_message.Message,), - { - "DESCRIPTOR": _MODEL, - "__module__": "google.cloud.automl_v1beta1.proto.model_pb2", - "__doc__": """API proto representing a trained machine learning model. - - Attributes: - model_metadata: - Required. The model metadata that is specific to the problem - type. Must match the metadata type of the dataset used to - train the model. - translation_model_metadata: - Metadata for translation models. - image_classification_model_metadata: - Metadata for image classification models. - text_classification_model_metadata: - Metadata for text classification models. - image_object_detection_model_metadata: - Metadata for image object detection models. - video_classification_model_metadata: - Metadata for video classification models. - video_object_tracking_model_metadata: - Metadata for video object tracking models. - text_extraction_model_metadata: - Metadata for text extraction models. - tables_model_metadata: - Metadata for Tables models. - text_sentiment_model_metadata: - Metadata for text sentiment models. - name: - Output only. Resource name of the model. Format: ``projects/{p - roject_id}/locations/{location_id}/models/{model_id}`` - display_name: - Required. The name of the model to show in the interface. The - name can be up to 32 characters long and can consist only of - ASCII Latin letters A-Z and a-z, underscores (_), and ASCII - digits 0-9. It must start with a letter. - dataset_id: - Required. The resource ID of the dataset used to create the - model. The dataset must come from the same ancestor project - and location. - create_time: - Output only. Timestamp when the model training finished and - can be used for prediction. - update_time: - Output only. Timestamp when this model was last updated. - deployment_state: - Output only. Deployment state of the model. A model can only - serve prediction requests after it gets deployed. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Model) - }, -) -_sym_db.RegisterMessage(Model) - - -DESCRIPTOR._options = None -_MODEL._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/model_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/model_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/model_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/operations.proto b/google/cloud/automl_v1beta1/proto/operations.proto deleted file mode 100644 index cce3fedc..00000000 --- a/google/cloud/automl_v1beta1/proto/operations.proto +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/cloud/automl/v1beta1/io.proto"; -import "google/cloud/automl/v1beta1/model.proto"; -import "google/cloud/automl/v1beta1/model_evaluation.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; -import "google/rpc/status.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// Metadata used across all long running operations returned by AutoML API. -message OperationMetadata { - // Ouptut only. Details of specific operation. Even if this field is empty, - // the presence allows to distinguish different types of operations. - oneof details { - // Details of a Delete operation. - DeleteOperationMetadata delete_details = 8; - - // Details of a DeployModel operation. - DeployModelOperationMetadata deploy_model_details = 24; - - // Details of an UndeployModel operation. - UndeployModelOperationMetadata undeploy_model_details = 25; - - // Details of CreateModel operation. - CreateModelOperationMetadata create_model_details = 10; - - // Details of ImportData operation. - ImportDataOperationMetadata import_data_details = 15; - - // Details of BatchPredict operation. - BatchPredictOperationMetadata batch_predict_details = 16; - - // Details of ExportData operation. - ExportDataOperationMetadata export_data_details = 21; - - // Details of ExportModel operation. - ExportModelOperationMetadata export_model_details = 22; - - // Details of ExportEvaluatedExamples operation. - ExportEvaluatedExamplesOperationMetadata export_evaluated_examples_details = 26; - } - - // Output only. Progress of operation. Range: [0, 100]. - // Not used currently. - int32 progress_percent = 13; - - // Output only. Partial failures encountered. - // E.g. single files that couldn't be read. - // This field should never exceed 20 entries. - // Status details field will contain standard GCP error details. - repeated google.rpc.Status partial_failures = 2; - - // Output only. Time when the operation was created. - google.protobuf.Timestamp create_time = 3; - - // Output only. Time when the operation was updated for the last time. - google.protobuf.Timestamp update_time = 4; -} - -// Details of operations that perform deletes of any entities. -message DeleteOperationMetadata { - -} - -// Details of DeployModel operation. -message DeployModelOperationMetadata { - -} - -// Details of UndeployModel operation. -message UndeployModelOperationMetadata { - -} - -// Details of CreateModel operation. -message CreateModelOperationMetadata { - -} - -// Details of ImportData operation. -message ImportDataOperationMetadata { - -} - -// Details of ExportData operation. -message ExportDataOperationMetadata { - // Further describes this export data's output. - // Supplements - // [OutputConfig][google.cloud.automl.v1beta1.OutputConfig]. - message ExportDataOutputInfo { - // The output location to which the exported data is written. - oneof output_location { - // The full path of the Google Cloud Storage directory created, into which - // the exported data is written. - string gcs_output_directory = 1; - - // The path of the BigQuery dataset created, in bq://projectId.bqDatasetId - // format, into which the exported data is written. - string bigquery_output_dataset = 2; - } - } - - // Output only. Information further describing this export data's output. - ExportDataOutputInfo output_info = 1; -} - -// Details of BatchPredict operation. -message BatchPredictOperationMetadata { - // Further describes this batch predict's output. - // Supplements - // - // [BatchPredictOutputConfig][google.cloud.automl.v1beta1.BatchPredictOutputConfig]. - message BatchPredictOutputInfo { - // The output location into which prediction output is written. - oneof output_location { - // The full path of the Google Cloud Storage directory created, into which - // the prediction output is written. - string gcs_output_directory = 1; - - // The path of the BigQuery dataset created, in bq://projectId.bqDatasetId - // format, into which the prediction output is written. - string bigquery_output_dataset = 2; - } - } - - // Output only. The input config that was given upon starting this - // batch predict operation. - BatchPredictInputConfig input_config = 1; - - // Output only. Information further describing this batch predict's output. - BatchPredictOutputInfo output_info = 2; -} - -// Details of ExportModel operation. -message ExportModelOperationMetadata { - // Further describes the output of model export. - // Supplements - // - // [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. - message ExportModelOutputInfo { - // The full path of the Google Cloud Storage directory created, into which - // the model will be exported. - string gcs_output_directory = 1; - } - - // Output only. Information further describing the output of this model - // export. - ExportModelOutputInfo output_info = 2; -} - -// Details of EvaluatedExamples operation. -message ExportEvaluatedExamplesOperationMetadata { - // Further describes the output of the evaluated examples export. - // Supplements - // - // [ExportEvaluatedExamplesOutputConfig][google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig]. - message ExportEvaluatedExamplesOutputInfo { - // The path of the BigQuery dataset created, in bq://projectId.bqDatasetId - // format, into which the output of export evaluated examples is written. - string bigquery_output_dataset = 2; - } - - // Output only. Information further describing the output of this evaluated - // examples export. - ExportEvaluatedExamplesOutputInfo output_info = 2; -} diff --git a/google/cloud/automl_v1beta1/proto/operations_pb2.py b/google/cloud/automl_v1beta1/proto/operations_pb2.py deleted file mode 100644 index a6811b08..00000000 --- a/google/cloud/automl_v1beta1/proto/operations_pb2.py +++ /dev/null @@ -1,1306 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/operations.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.automl_v1beta1.proto import ( - io_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - model_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - model_evaluation_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/operations.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/operations.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a-google/cloud/automl_v1beta1/proto/model.proto\x1a\x38google/cloud/automl_v1beta1/proto/model_evaluation.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x1cgoogle/api/annotations.proto"\x8b\x08\n\x11OperationMetadata\x12N\n\x0e\x64\x65lete_details\x18\x08 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.DeleteOperationMetadataH\x00\x12Y\n\x14\x64\x65ploy_model_details\x18\x18 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.DeployModelOperationMetadataH\x00\x12]\n\x16undeploy_model_details\x18\x19 \x01(\x0b\x32;.google.cloud.automl.v1beta1.UndeployModelOperationMetadataH\x00\x12Y\n\x14\x63reate_model_details\x18\n \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.CreateModelOperationMetadataH\x00\x12W\n\x13import_data_details\x18\x0f \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ImportDataOperationMetadataH\x00\x12[\n\x15\x62\x61tch_predict_details\x18\x10 \x01(\x0b\x32:.google.cloud.automl.v1beta1.BatchPredictOperationMetadataH\x00\x12W\n\x13\x65xport_data_details\x18\x15 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ExportDataOperationMetadataH\x00\x12Y\n\x14\x65xport_model_details\x18\x16 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.ExportModelOperationMetadataH\x00\x12r\n!export_evaluated_examples_details\x18\x1a \x01(\x0b\x32\x45.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadataH\x00\x12\x18\n\x10progress_percent\x18\r \x01(\x05\x12,\n\x10partial_failures\x18\x02 \x03(\x0b\x32\x12.google.rpc.Status\x12/\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\t\n\x07\x64\x65tails"\x19\n\x17\x44\x65leteOperationMetadata"\x1e\n\x1c\x44\x65ployModelOperationMetadata" \n\x1eUndeployModelOperationMetadata"\x1e\n\x1c\x43reateModelOperationMetadata"\x1d\n\x1bImportDataOperationMetadata"\xef\x01\n\x1b\x45xportDataOperationMetadata\x12\x62\n\x0boutput_info\x18\x01 \x01(\x0b\x32M.google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo\x1al\n\x14\x45xportDataOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xc3\x02\n\x1d\x42\x61tchPredictOperationMetadata\x12J\n\x0cinput_config\x18\x01 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.BatchPredictInputConfig\x12\x66\n\x0boutput_info\x18\x02 \x01(\x0b\x32Q.google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo\x1an\n\x16\x42\x61tchPredictOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xbb\x01\n\x1c\x45xportModelOperationMetadata\x12\x64\n\x0boutput_info\x18\x02 \x01(\x0b\x32O.google.cloud.automl.v1beta1.ExportModelOperationMetadata.ExportModelOutputInfo\x1a\x35\n\x15\x45xportModelOutputInfo\x12\x1c\n\x14gcs_output_directory\x18\x01 \x01(\t"\xee\x01\n(ExportEvaluatedExamplesOperationMetadata\x12|\n\x0boutput_info\x18\x02 \x01(\x0b\x32g.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo\x1a\x44\n!ExportEvaluatedExamplesOutputInfo\x12\x1f\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_rpc_dot_status__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_OPERATIONMETADATA = _descriptor.Descriptor( - name="OperationMetadata", - full_name="google.cloud.automl.v1beta1.OperationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="delete_details", - full_name="google.cloud.automl.v1beta1.OperationMetadata.delete_details", - index=0, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="deploy_model_details", - full_name="google.cloud.automl.v1beta1.OperationMetadata.deploy_model_details", - index=1, - number=24, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="undeploy_model_details", - full_name="google.cloud.automl.v1beta1.OperationMetadata.undeploy_model_details", - index=2, - number=25, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create_model_details", - full_name="google.cloud.automl.v1beta1.OperationMetadata.create_model_details", - index=3, - number=10, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="import_data_details", - full_name="google.cloud.automl.v1beta1.OperationMetadata.import_data_details", - index=4, - number=15, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="batch_predict_details", - full_name="google.cloud.automl.v1beta1.OperationMetadata.batch_predict_details", - index=5, - number=16, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="export_data_details", - full_name="google.cloud.automl.v1beta1.OperationMetadata.export_data_details", - index=6, - number=21, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="export_model_details", - full_name="google.cloud.automl.v1beta1.OperationMetadata.export_model_details", - index=7, - number=22, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="export_evaluated_examples_details", - full_name="google.cloud.automl.v1beta1.OperationMetadata.export_evaluated_examples_details", - index=8, - number=26, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="progress_percent", - full_name="google.cloud.automl.v1beta1.OperationMetadata.progress_percent", - index=9, - number=13, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="partial_failures", - full_name="google.cloud.automl.v1beta1.OperationMetadata.partial_failures", - index=10, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create_time", - full_name="google.cloud.automl.v1beta1.OperationMetadata.create_time", - index=11, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_time", - full_name="google.cloud.automl.v1beta1.OperationMetadata.update_time", - index=12, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="details", - full_name="google.cloud.automl.v1beta1.OperationMetadata.details", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=350, - serialized_end=1385, -) - - -_DELETEOPERATIONMETADATA = _descriptor.Descriptor( - name="DeleteOperationMetadata", - full_name="google.cloud.automl.v1beta1.DeleteOperationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1387, - serialized_end=1412, -) - - -_DEPLOYMODELOPERATIONMETADATA = _descriptor.Descriptor( - name="DeployModelOperationMetadata", - full_name="google.cloud.automl.v1beta1.DeployModelOperationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1414, - serialized_end=1444, -) - - -_UNDEPLOYMODELOPERATIONMETADATA = _descriptor.Descriptor( - name="UndeployModelOperationMetadata", - full_name="google.cloud.automl.v1beta1.UndeployModelOperationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1446, - serialized_end=1478, -) - - -_CREATEMODELOPERATIONMETADATA = _descriptor.Descriptor( - name="CreateModelOperationMetadata", - full_name="google.cloud.automl.v1beta1.CreateModelOperationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1480, - serialized_end=1510, -) - - -_IMPORTDATAOPERATIONMETADATA = _descriptor.Descriptor( - name="ImportDataOperationMetadata", - full_name="google.cloud.automl.v1beta1.ImportDataOperationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1512, - serialized_end=1541, -) - - -_EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO = _descriptor.Descriptor( - name="ExportDataOutputInfo", - full_name="google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="gcs_output_directory", - full_name="google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo.gcs_output_directory", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="bigquery_output_dataset", - full_name="google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo.bigquery_output_dataset", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="output_location", - full_name="google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo.output_location", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1675, - serialized_end=1783, -) - -_EXPORTDATAOPERATIONMETADATA = _descriptor.Descriptor( - name="ExportDataOperationMetadata", - full_name="google.cloud.automl.v1beta1.ExportDataOperationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="output_info", - full_name="google.cloud.automl.v1beta1.ExportDataOperationMetadata.output_info", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1544, - serialized_end=1783, -) - - -_BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO = _descriptor.Descriptor( - name="BatchPredictOutputInfo", - full_name="google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="gcs_output_directory", - full_name="google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo.gcs_output_directory", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="bigquery_output_dataset", - full_name="google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo.bigquery_output_dataset", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="output_location", - full_name="google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo.output_location", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1999, - serialized_end=2109, -) - -_BATCHPREDICTOPERATIONMETADATA = _descriptor.Descriptor( - name="BatchPredictOperationMetadata", - full_name="google.cloud.automl.v1beta1.BatchPredictOperationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="input_config", - full_name="google.cloud.automl.v1beta1.BatchPredictOperationMetadata.input_config", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="output_info", - full_name="google.cloud.automl.v1beta1.BatchPredictOperationMetadata.output_info", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1786, - serialized_end=2109, -) - - -_EXPORTMODELOPERATIONMETADATA_EXPORTMODELOUTPUTINFO = _descriptor.Descriptor( - name="ExportModelOutputInfo", - full_name="google.cloud.automl.v1beta1.ExportModelOperationMetadata.ExportModelOutputInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="gcs_output_directory", - full_name="google.cloud.automl.v1beta1.ExportModelOperationMetadata.ExportModelOutputInfo.gcs_output_directory", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2246, - serialized_end=2299, -) - -_EXPORTMODELOPERATIONMETADATA = _descriptor.Descriptor( - name="ExportModelOperationMetadata", - full_name="google.cloud.automl.v1beta1.ExportModelOperationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="output_info", - full_name="google.cloud.automl.v1beta1.ExportModelOperationMetadata.output_info", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_EXPORTMODELOPERATIONMETADATA_EXPORTMODELOUTPUTINFO,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2112, - serialized_end=2299, -) - - -_EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA_EXPORTEVALUATEDEXAMPLESOUTPUTINFO = _descriptor.Descriptor( - name="ExportEvaluatedExamplesOutputInfo", - full_name="google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="bigquery_output_dataset", - full_name="google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo.bigquery_output_dataset", - index=0, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2472, - serialized_end=2540, -) - -_EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA = _descriptor.Descriptor( - name="ExportEvaluatedExamplesOperationMetadata", - full_name="google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="output_info", - full_name="google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata.output_info", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA_EXPORTEVALUATEDEXAMPLESOUTPUTINFO, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2302, - serialized_end=2540, -) - -_OPERATIONMETADATA.fields_by_name[ - "delete_details" -].message_type = _DELETEOPERATIONMETADATA -_OPERATIONMETADATA.fields_by_name[ - "deploy_model_details" -].message_type = _DEPLOYMODELOPERATIONMETADATA -_OPERATIONMETADATA.fields_by_name[ - "undeploy_model_details" -].message_type = _UNDEPLOYMODELOPERATIONMETADATA -_OPERATIONMETADATA.fields_by_name[ - "create_model_details" -].message_type = _CREATEMODELOPERATIONMETADATA -_OPERATIONMETADATA.fields_by_name[ - "import_data_details" -].message_type = _IMPORTDATAOPERATIONMETADATA -_OPERATIONMETADATA.fields_by_name[ - "batch_predict_details" -].message_type = _BATCHPREDICTOPERATIONMETADATA -_OPERATIONMETADATA.fields_by_name[ - "export_data_details" -].message_type = _EXPORTDATAOPERATIONMETADATA -_OPERATIONMETADATA.fields_by_name[ - "export_model_details" -].message_type = _EXPORTMODELOPERATIONMETADATA -_OPERATIONMETADATA.fields_by_name[ - "export_evaluated_examples_details" -].message_type = _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA -_OPERATIONMETADATA.fields_by_name[ - "partial_failures" -].message_type = google_dot_rpc_dot_status__pb2._STATUS -_OPERATIONMETADATA.fields_by_name[ - "create_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_OPERATIONMETADATA.fields_by_name[ - "update_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_OPERATIONMETADATA.oneofs_by_name["details"].fields.append( - _OPERATIONMETADATA.fields_by_name["delete_details"] -) -_OPERATIONMETADATA.fields_by_name[ - "delete_details" -].containing_oneof = _OPERATIONMETADATA.oneofs_by_name["details"] -_OPERATIONMETADATA.oneofs_by_name["details"].fields.append( - _OPERATIONMETADATA.fields_by_name["deploy_model_details"] -) -_OPERATIONMETADATA.fields_by_name[ - "deploy_model_details" -].containing_oneof = _OPERATIONMETADATA.oneofs_by_name["details"] -_OPERATIONMETADATA.oneofs_by_name["details"].fields.append( - _OPERATIONMETADATA.fields_by_name["undeploy_model_details"] -) -_OPERATIONMETADATA.fields_by_name[ - "undeploy_model_details" -].containing_oneof = _OPERATIONMETADATA.oneofs_by_name["details"] -_OPERATIONMETADATA.oneofs_by_name["details"].fields.append( - _OPERATIONMETADATA.fields_by_name["create_model_details"] -) -_OPERATIONMETADATA.fields_by_name[ - "create_model_details" -].containing_oneof = _OPERATIONMETADATA.oneofs_by_name["details"] -_OPERATIONMETADATA.oneofs_by_name["details"].fields.append( - _OPERATIONMETADATA.fields_by_name["import_data_details"] -) -_OPERATIONMETADATA.fields_by_name[ - "import_data_details" -].containing_oneof = _OPERATIONMETADATA.oneofs_by_name["details"] -_OPERATIONMETADATA.oneofs_by_name["details"].fields.append( - _OPERATIONMETADATA.fields_by_name["batch_predict_details"] -) -_OPERATIONMETADATA.fields_by_name[ - "batch_predict_details" -].containing_oneof = _OPERATIONMETADATA.oneofs_by_name["details"] -_OPERATIONMETADATA.oneofs_by_name["details"].fields.append( - _OPERATIONMETADATA.fields_by_name["export_data_details"] -) -_OPERATIONMETADATA.fields_by_name[ - "export_data_details" -].containing_oneof = _OPERATIONMETADATA.oneofs_by_name["details"] -_OPERATIONMETADATA.oneofs_by_name["details"].fields.append( - _OPERATIONMETADATA.fields_by_name["export_model_details"] -) -_OPERATIONMETADATA.fields_by_name[ - "export_model_details" -].containing_oneof = _OPERATIONMETADATA.oneofs_by_name["details"] -_OPERATIONMETADATA.oneofs_by_name["details"].fields.append( - _OPERATIONMETADATA.fields_by_name["export_evaluated_examples_details"] -) -_OPERATIONMETADATA.fields_by_name[ - "export_evaluated_examples_details" -].containing_oneof = _OPERATIONMETADATA.oneofs_by_name["details"] -_EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO.containing_type = ( - _EXPORTDATAOPERATIONMETADATA -) -_EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO.oneofs_by_name[ - "output_location" -].fields.append( - _EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO.fields_by_name[ - "gcs_output_directory" - ] -) -_EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO.fields_by_name[ - "gcs_output_directory" -].containing_oneof = _EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO.oneofs_by_name[ - "output_location" -] -_EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO.oneofs_by_name[ - "output_location" -].fields.append( - _EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO.fields_by_name[ - "bigquery_output_dataset" - ] -) -_EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO.fields_by_name[ - "bigquery_output_dataset" -].containing_oneof = _EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO.oneofs_by_name[ - "output_location" -] -_EXPORTDATAOPERATIONMETADATA.fields_by_name[ - "output_info" -].message_type = _EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO -_BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO.containing_type = ( - _BATCHPREDICTOPERATIONMETADATA -) -_BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO.oneofs_by_name[ - "output_location" -].fields.append( - _BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO.fields_by_name[ - "gcs_output_directory" - ] -) -_BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO.fields_by_name[ - "gcs_output_directory" -].containing_oneof = _BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO.oneofs_by_name[ - "output_location" -] -_BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO.oneofs_by_name[ - "output_location" -].fields.append( - _BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO.fields_by_name[ - "bigquery_output_dataset" - ] -) -_BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO.fields_by_name[ - "bigquery_output_dataset" -].containing_oneof = _BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO.oneofs_by_name[ - "output_location" -] -_BATCHPREDICTOPERATIONMETADATA.fields_by_name[ - "input_config" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2._BATCHPREDICTINPUTCONFIG -) -_BATCHPREDICTOPERATIONMETADATA.fields_by_name[ - "output_info" -].message_type = _BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO -_EXPORTMODELOPERATIONMETADATA_EXPORTMODELOUTPUTINFO.containing_type = ( - _EXPORTMODELOPERATIONMETADATA -) -_EXPORTMODELOPERATIONMETADATA.fields_by_name[ - "output_info" -].message_type = _EXPORTMODELOPERATIONMETADATA_EXPORTMODELOUTPUTINFO -_EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA_EXPORTEVALUATEDEXAMPLESOUTPUTINFO.containing_type = ( - _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA -) -_EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA.fields_by_name[ - "output_info" -].message_type = ( - _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA_EXPORTEVALUATEDEXAMPLESOUTPUTINFO -) -DESCRIPTOR.message_types_by_name["OperationMetadata"] = _OPERATIONMETADATA -DESCRIPTOR.message_types_by_name["DeleteOperationMetadata"] = _DELETEOPERATIONMETADATA -DESCRIPTOR.message_types_by_name[ - "DeployModelOperationMetadata" -] = _DEPLOYMODELOPERATIONMETADATA -DESCRIPTOR.message_types_by_name[ - "UndeployModelOperationMetadata" -] = _UNDEPLOYMODELOPERATIONMETADATA -DESCRIPTOR.message_types_by_name[ - "CreateModelOperationMetadata" -] = _CREATEMODELOPERATIONMETADATA -DESCRIPTOR.message_types_by_name[ - "ImportDataOperationMetadata" -] = _IMPORTDATAOPERATIONMETADATA -DESCRIPTOR.message_types_by_name[ - "ExportDataOperationMetadata" -] = _EXPORTDATAOPERATIONMETADATA -DESCRIPTOR.message_types_by_name[ - "BatchPredictOperationMetadata" -] = _BATCHPREDICTOPERATIONMETADATA -DESCRIPTOR.message_types_by_name[ - "ExportModelOperationMetadata" -] = _EXPORTMODELOPERATIONMETADATA -DESCRIPTOR.message_types_by_name[ - "ExportEvaluatedExamplesOperationMetadata" -] = _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -OperationMetadata = _reflection.GeneratedProtocolMessageType( - "OperationMetadata", - (_message.Message,), - { - "DESCRIPTOR": _OPERATIONMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Metadata used across all long running operations returned by AutoML - API. - - Attributes: - details: - Ouptut only. Details of specific operation. Even if this field - is empty, the presence allows to distinguish different types - of operations. - delete_details: - Details of a Delete operation. - deploy_model_details: - Details of a DeployModel operation. - undeploy_model_details: - Details of an UndeployModel operation. - create_model_details: - Details of CreateModel operation. - import_data_details: - Details of ImportData operation. - batch_predict_details: - Details of BatchPredict operation. - export_data_details: - Details of ExportData operation. - export_model_details: - Details of ExportModel operation. - export_evaluated_examples_details: - Details of ExportEvaluatedExamples operation. - progress_percent: - Output only. Progress of operation. Range: [0, 100]. Not used - currently. - partial_failures: - Output only. Partial failures encountered. E.g. single files - that couldn’t be read. This field should never exceed 20 - entries. Status details field will contain standard GCP error - details. - create_time: - Output only. Time when the operation was created. - update_time: - Output only. Time when the operation was updated for the last - time. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.OperationMetadata) - }, -) -_sym_db.RegisterMessage(OperationMetadata) - -DeleteOperationMetadata = _reflection.GeneratedProtocolMessageType( - "DeleteOperationMetadata", - (_message.Message,), - { - "DESCRIPTOR": _DELETEOPERATIONMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Details of operations that perform deletes of any entities.""", - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeleteOperationMetadata) - }, -) -_sym_db.RegisterMessage(DeleteOperationMetadata) - -DeployModelOperationMetadata = _reflection.GeneratedProtocolMessageType( - "DeployModelOperationMetadata", - (_message.Message,), - { - "DESCRIPTOR": _DEPLOYMODELOPERATIONMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Details of DeployModel operation.""", - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeployModelOperationMetadata) - }, -) -_sym_db.RegisterMessage(DeployModelOperationMetadata) - -UndeployModelOperationMetadata = _reflection.GeneratedProtocolMessageType( - "UndeployModelOperationMetadata", - (_message.Message,), - { - "DESCRIPTOR": _UNDEPLOYMODELOPERATIONMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Details of UndeployModel operation.""", - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UndeployModelOperationMetadata) - }, -) -_sym_db.RegisterMessage(UndeployModelOperationMetadata) - -CreateModelOperationMetadata = _reflection.GeneratedProtocolMessageType( - "CreateModelOperationMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATEMODELOPERATIONMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Details of CreateModel operation.""", - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CreateModelOperationMetadata) - }, -) -_sym_db.RegisterMessage(CreateModelOperationMetadata) - -ImportDataOperationMetadata = _reflection.GeneratedProtocolMessageType( - "ImportDataOperationMetadata", - (_message.Message,), - { - "DESCRIPTOR": _IMPORTDATAOPERATIONMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Details of ImportData operation.""", - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImportDataOperationMetadata) - }, -) -_sym_db.RegisterMessage(ImportDataOperationMetadata) - -ExportDataOperationMetadata = _reflection.GeneratedProtocolMessageType( - "ExportDataOperationMetadata", - (_message.Message,), - { - "ExportDataOutputInfo": _reflection.GeneratedProtocolMessageType( - "ExportDataOutputInfo", - (_message.Message,), - { - "DESCRIPTOR": _EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO, - "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Further describes this export data’s output. Supplements - [OutputConfig][google.cloud.automl.v1beta1.OutputConfig]. - - Attributes: - output_location: - The output location to which the exported data is written. - gcs_output_directory: - The full path of the Google Cloud Storage directory created, - into which the exported data is written. - bigquery_output_dataset: - The path of the BigQuery dataset created, in - bq://projectId.bqDatasetId format, into which the exported - data is written. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo) - }, - ), - "DESCRIPTOR": _EXPORTDATAOPERATIONMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Details of ExportData operation. - - Attributes: - output_info: - Output only. Information further describing this export data’s - output. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportDataOperationMetadata) - }, -) -_sym_db.RegisterMessage(ExportDataOperationMetadata) -_sym_db.RegisterMessage(ExportDataOperationMetadata.ExportDataOutputInfo) - -BatchPredictOperationMetadata = _reflection.GeneratedProtocolMessageType( - "BatchPredictOperationMetadata", - (_message.Message,), - { - "BatchPredictOutputInfo": _reflection.GeneratedProtocolMessageType( - "BatchPredictOutputInfo", - (_message.Message,), - { - "DESCRIPTOR": _BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO, - "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Further describes this batch predict’s output. Supplements [BatchPred - ictOutputConfig][google.cloud.automl.v1beta1.BatchPredictOutputConfig] - . - - Attributes: - output_location: - The output location into which prediction output is written. - gcs_output_directory: - The full path of the Google Cloud Storage directory created, - into which the prediction output is written. - bigquery_output_dataset: - The path of the BigQuery dataset created, in - bq://projectId.bqDatasetId format, into which the prediction - output is written. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo) - }, - ), - "DESCRIPTOR": _BATCHPREDICTOPERATIONMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Details of BatchPredict operation. - - Attributes: - input_config: - Output only. The input config that was given upon starting - this batch predict operation. - output_info: - Output only. Information further describing this batch - predict’s output. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BatchPredictOperationMetadata) - }, -) -_sym_db.RegisterMessage(BatchPredictOperationMetadata) -_sym_db.RegisterMessage(BatchPredictOperationMetadata.BatchPredictOutputInfo) - -ExportModelOperationMetadata = _reflection.GeneratedProtocolMessageType( - "ExportModelOperationMetadata", - (_message.Message,), - { - "ExportModelOutputInfo": _reflection.GeneratedProtocolMessageType( - "ExportModelOutputInfo", - (_message.Message,), - { - "DESCRIPTOR": _EXPORTMODELOPERATIONMETADATA_EXPORTMODELOUTPUTINFO, - "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Further describes the output of model export. Supplements [ModelExpor - tOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. - - Attributes: - gcs_output_directory: - The full path of the Google Cloud Storage directory created, - into which the model will be exported. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportModelOperationMetadata.ExportModelOutputInfo) - }, - ), - "DESCRIPTOR": _EXPORTMODELOPERATIONMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Details of ExportModel operation. - - Attributes: - output_info: - Output only. Information further describing the output of this - model export. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportModelOperationMetadata) - }, -) -_sym_db.RegisterMessage(ExportModelOperationMetadata) -_sym_db.RegisterMessage(ExportModelOperationMetadata.ExportModelOutputInfo) - -ExportEvaluatedExamplesOperationMetadata = _reflection.GeneratedProtocolMessageType( - "ExportEvaluatedExamplesOperationMetadata", - (_message.Message,), - { - "ExportEvaluatedExamplesOutputInfo": _reflection.GeneratedProtocolMessageType( - "ExportEvaluatedExamplesOutputInfo", - (_message.Message,), - { - "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA_EXPORTEVALUATEDEXAMPLESOUTPUTINFO, - "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Further describes the output of the evaluated examples export. - Supplements [ExportEvaluatedExamplesOutputConfig][google.cloud.automl - .v1beta1.ExportEvaluatedExamplesOutputConfig]. - - Attributes: - bigquery_output_dataset: - The path of the BigQuery dataset created, in - bq://projectId.bqDatasetId format, into which the output of - export evaluated examples is written. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo) - }, - ), - "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Details of EvaluatedExamples operation. - - Attributes: - output_info: - Output only. Information further describing the output of this - evaluated examples export. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata) - }, -) -_sym_db.RegisterMessage(ExportEvaluatedExamplesOperationMetadata) -_sym_db.RegisterMessage( - ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo -) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/operations_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/operations_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/operations_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/prediction_service.proto b/google/cloud/automl_v1beta1/proto/prediction_service.proto deleted file mode 100644 index 0bcf685e..00000000 --- a/google/cloud/automl_v1beta1/proto/prediction_service.proto +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/cloud/automl/v1beta1/annotation_payload.proto"; -import "google/cloud/automl/v1beta1/data_items.proto"; -import "google/cloud/automl/v1beta1/io.proto"; -import "google/cloud/automl/v1beta1/operations.proto"; -import "google/longrunning/operations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_outer_classname = "PredictionServiceProto"; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// AutoML Prediction API. -// -// On any input that is documented to expect a string parameter in -// snake_case or kebab-case, either of those cases is accepted. -service PredictionService { - option (google.api.default_host) = "automl.googleapis.com"; - option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; - - // Perform an online prediction. The prediction result will be directly - // returned in the response. - // Available for following ML problems, and their expected request payloads: - // * Image Classification - Image in .JPEG, .GIF or .PNG format, image_bytes - // up to 30MB. - // * Image Object Detection - Image in .JPEG, .GIF or .PNG format, image_bytes - // up to 30MB. - // * Text Classification - TextSnippet, content up to 60,000 characters, - // UTF-8 encoded. - // * Text Extraction - TextSnippet, content up to 30,000 characters, - // UTF-8 NFC encoded. - // * Translation - TextSnippet, content up to 25,000 characters, UTF-8 - // encoded. - // * Tables - Row, with column values matching the columns of the model, - // up to 5MB. Not available for FORECASTING - // - // [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]. - // * Text Sentiment - TextSnippet, content up 500 characters, UTF-8 - // encoded. - rpc Predict(PredictRequest) returns (PredictResponse) { - option (google.api.http) = { - post: "/v1beta1/{name=projects/*/locations/*/models/*}:predict" - body: "*" - }; - option (google.api.method_signature) = "name,payload,params"; - } - - // Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], batch - // prediction result won't be immediately available in the response. Instead, - // a long running operation object is returned. User can poll the operation - // result via [GetOperation][google.longrunning.Operations.GetOperation] - // method. Once the operation is done, [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] is returned in - // the [response][google.longrunning.Operation.response] field. - // Available for following ML problems: - // * Image Classification - // * Image Object Detection - // * Video Classification - // * Video Object Tracking * Text Extraction - // * Tables - rpc BatchPredict(BatchPredictRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1beta1/{name=projects/*/locations/*/models/*}:batchPredict" - body: "*" - }; - option (google.api.method_signature) = "name,input_config,output_config,params"; - option (google.longrunning.operation_info) = { - response_type: "BatchPredictResult" - metadata_type: "OperationMetadata" - }; - } -} - -// Request message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. -message PredictRequest { - // Required. Name of the model requested to serve the prediction. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/Model" - } - ]; - - // Required. Payload to perform a prediction on. The payload must match the - // problem type that the model was trained to solve. - ExamplePayload payload = 2 [(google.api.field_behavior) = REQUIRED]; - - // Additional domain-specific parameters, any string must be up to 25000 - // characters long. - // - // * For Image Classification: - // - // `score_threshold` - (float) A value from 0.0 to 1.0. When the model - // makes predictions for an image, it will only produce results that have - // at least this confidence score. The default is 0.5. - // - // * For Image Object Detection: - // `score_threshold` - (float) When Model detects objects on the image, - // it will only produce bounding boxes which have at least this - // confidence score. Value in 0 to 1 range, default is 0.5. - // `max_bounding_box_count` - (int64) No more than this number of bounding - // boxes will be returned in the response. Default is 100, the - // requested value may be limited by server. - // * For Tables: - // feature_importance - (boolean) Whether feature importance - // should be populated in the returned TablesAnnotation. - // The default is false. - map params = 3; -} - -// Response message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. -message PredictResponse { - // Prediction result. - // Translation and Text Sentiment will return precisely one payload. - repeated AnnotationPayload payload = 1; - - // The preprocessed example that AutoML actually makes prediction on. - // Empty if AutoML does not preprocess the input example. - // * For Text Extraction: - // If the input is a .pdf file, the OCR'ed text will be provided in - // [document_text][google.cloud.automl.v1beta1.Document.document_text]. - ExamplePayload preprocessed_input = 3; - - // Additional domain-specific prediction response metadata. - // - // * For Image Object Detection: - // `max_bounding_box_count` - (int64) At most that many bounding boxes per - // image could have been returned. - // - // * For Text Sentiment: - // `sentiment_score` - (float, deprecated) A value between -1 and 1, - // -1 maps to least positive sentiment, while 1 maps to the most positive - // one and the higher the score, the more positive the sentiment in the - // document is. Yet these values are relative to the training data, so - // e.g. if all data was positive then -1 will be also positive (though - // the least). - // The sentiment_score shouldn't be confused with "score" or "magnitude" - // from the previous Natural Language Sentiment Analysis API. - map metadata = 2; -} - -// Request message for [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. -message BatchPredictRequest { - // Required. Name of the model requested to serve the batch prediction. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/Model" - } - ]; - - // Required. The input configuration for batch prediction. - BatchPredictInputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Configuration specifying where output predictions should - // be written. - BatchPredictOutputConfig output_config = 4 [(google.api.field_behavior) = REQUIRED]; - - // Required. Additional domain-specific parameters for the predictions, any string must - // be up to 25000 characters long. - // - // * For Text Classification: - // - // `score_threshold` - (float) A value from 0.0 to 1.0. When the model - // makes predictions for a text snippet, it will only produce results - // that have at least this confidence score. The default is 0.5. - // - // * For Image Classification: - // - // `score_threshold` - (float) A value from 0.0 to 1.0. When the model - // makes predictions for an image, it will only produce results that - // have at least this confidence score. The default is 0.5. - // - // * For Image Object Detection: - // - // `score_threshold` - (float) When Model detects objects on the image, - // it will only produce bounding boxes which have at least this - // confidence score. Value in 0 to 1 range, default is 0.5. - // `max_bounding_box_count` - (int64) No more than this number of bounding - // boxes will be produced per image. Default is 100, the - // requested value may be limited by server. - // - // * For Video Classification : - // - // `score_threshold` - (float) A value from 0.0 to 1.0. When the model - // makes predictions for a video, it will only produce results that - // have at least this confidence score. The default is 0.5. - // `segment_classification` - (boolean) Set to true to request - // segment-level classification. AutoML Video Intelligence returns - // labels and their confidence scores for the entire segment of the - // video that user specified in the request configuration. - // The default is "true". - // `shot_classification` - (boolean) Set to true to request shot-level - // classification. AutoML Video Intelligence determines the boundaries - // for each camera shot in the entire segment of the video that user - // specified in the request configuration. AutoML Video Intelligence - // then returns labels and their confidence scores for each detected - // shot, along with the start and end time of the shot. - // WARNING: Model evaluation is not done for this classification type, - // the quality of it depends on training data, but there are no metrics - // provided to describe that quality. The default is "false". - // `1s_interval_classification` - (boolean) Set to true to request - // classification for a video at one-second intervals. AutoML Video - // Intelligence returns labels and their confidence scores for each - // second of the entire segment of the video that user specified in the - // request configuration. - // WARNING: Model evaluation is not done for this classification - // type, the quality of it depends on training data, but there are no - // metrics provided to describe that quality. The default is - // "false". - // - // * For Tables: - // - // feature_importance - (boolean) Whether feature importance - // should be populated in the returned TablesAnnotations. The - // default is false. - // - // * For Video Object Tracking: - // - // `score_threshold` - (float) When Model detects objects on video frames, - // it will only produce bounding boxes which have at least this - // confidence score. Value in 0 to 1 range, default is 0.5. - // `max_bounding_box_count` - (int64) No more than this number of bounding - // boxes will be returned per frame. Default is 100, the requested - // value may be limited by server. - // `min_bounding_box_size` - (float) Only bounding boxes with shortest edge - // at least that long as a relative value of video frame size will be - // returned. Value in 0 to 1 range. Default is 0. - map params = 5 [(google.api.field_behavior) = REQUIRED]; -} - -// Result of the Batch Predict. This message is returned in -// [response][google.longrunning.Operation.response] of the operation returned -// by the [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. -message BatchPredictResult { - // Additional domain-specific prediction response metadata. - // - // * For Image Object Detection: - // `max_bounding_box_count` - (int64) At most that many bounding boxes per - // image could have been returned. - // - // * For Video Object Tracking: - // `max_bounding_box_count` - (int64) At most that many bounding boxes per - // frame could have been returned. - map metadata = 1; -} diff --git a/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py b/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py deleted file mode 100644 index b22759e8..00000000 --- a/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py +++ /dev/null @@ -1,906 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/prediction_service.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.automl_v1beta1.proto import ( - annotation_payload_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__payload__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - data_items_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__items__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - io_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - operations_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_operations__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/prediction_service.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\026PredictionServiceProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n:google/cloud/automl_v1beta1/proto/prediction_service.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a:google/cloud/automl_v1beta1/proto/annotation_payload.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x32google/cloud/automl_v1beta1/proto/operations.proto\x1a#google/longrunning/operations.proto"\xfe\x01\n\x0ePredictRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12\x41\n\x07payload\x18\x02 \x01(\x0b\x32+.google.cloud.automl.v1beta1.ExamplePayloadB\x03\xe0\x41\x02\x12G\n\x06params\x18\x03 \x03(\x0b\x32\x37.google.cloud.automl.v1beta1.PredictRequest.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x9a\x02\n\x0fPredictResponse\x12?\n\x07payload\x18\x01 \x03(\x0b\x32..google.cloud.automl.v1beta1.AnnotationPayload\x12G\n\x12preprocessed_input\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.ExamplePayload\x12L\n\x08metadata\x18\x02 \x03(\x0b\x32:.google.cloud.automl.v1beta1.PredictResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xee\x02\n\x13\x42\x61tchPredictRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12O\n\x0cinput_config\x18\x03 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.BatchPredictInputConfigB\x03\xe0\x41\x02\x12Q\n\routput_config\x18\x04 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.BatchPredictOutputConfigB\x03\xe0\x41\x02\x12Q\n\x06params\x18\x05 \x03(\x0b\x32<.google.cloud.automl.v1beta1.BatchPredictRequest.ParamsEntryB\x03\xe0\x41\x02\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x96\x01\n\x12\x42\x61tchPredictResult\x12O\n\x08metadata\x18\x01 \x03(\x0b\x32=.google.cloud.automl.v1beta1.BatchPredictResult.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x32\x9e\x04\n\x11PredictionService\x12\xbe\x01\n\x07Predict\x12+.google.cloud.automl.v1beta1.PredictRequest\x1a,.google.cloud.automl.v1beta1.PredictResponse"X\x82\xd3\xe4\x93\x02<"7/v1beta1/{name=projects/*/locations/*/models/*}:predict:\x01*\xda\x41\x13name,payload,params\x12\xfc\x01\n\x0c\x42\x61tchPredict\x12\x30.google.cloud.automl.v1beta1.BatchPredictRequest\x1a\x1d.google.longrunning.Operation"\x9a\x01\x82\xd3\xe4\x93\x02\x41" The dataset has - // translation_dataset_metadata. - string filter = 3; - - // Requested page size. Server may return fewer results than requested. - // If unspecified, server will pick a default size. - int32 page_size = 4; - - // A token identifying a page of results for the server to return - // Typically obtained via - // [ListDatasetsResponse.next_page_token][google.cloud.automl.v1beta1.ListDatasetsResponse.next_page_token] of the previous - // [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets] call. - string page_token = 6; -} - -// Response message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. -message ListDatasetsResponse { - // The datasets read. - repeated Dataset datasets = 1; - - // A token to retrieve next page of results. - // Pass to [ListDatasetsRequest.page_token][google.cloud.automl.v1beta1.ListDatasetsRequest.page_token] to obtain that page. - string next_page_token = 2; -} - -// Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] -message UpdateDatasetRequest { - // Required. The dataset which replaces the resource on the server. - Dataset dataset = 1 [(google.api.field_behavior) = REQUIRED]; - - // The update mask applies to the resource. - google.protobuf.FieldMask update_mask = 2; -} - -// Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. -message DeleteDatasetRequest { - // Required. The resource name of the dataset to delete. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/Dataset" - } - ]; -} - -// Request message for [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. -message ImportDataRequest { - // Required. Dataset name. Dataset must already exist. All imported - // annotations and examples will be added. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/Dataset" - } - ]; - - // Required. The desired input location and its domain specific semantics, - // if any. - InputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// Request message for [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. -message ExportDataRequest { - // Required. The resource name of the dataset. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/Dataset" - } - ]; - - // Required. The desired output location. - OutputConfig output_config = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. -message GetAnnotationSpecRequest { - // Required. The resource name of the annotation spec to retrieve. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/AnnotationSpec" - } - ]; -} - -// Request message for [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. -message GetTableSpecRequest { - // Required. The resource name of the table spec to retrieve. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/TableSpec" - } - ]; - - // Mask specifying which fields to read. - google.protobuf.FieldMask field_mask = 2; -} - -// Request message for [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. -message ListTableSpecsRequest { - // Required. The resource name of the dataset to list table specs from. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/Dataset" - } - ]; - - // Mask specifying which fields to read. - google.protobuf.FieldMask field_mask = 2; - - // Filter expression, see go/filtering. - string filter = 3; - - // Requested page size. The server can return fewer results than requested. - // If unspecified, the server will pick a default size. - int32 page_size = 4; - - // A token identifying a page of results for the server to return. - // Typically obtained from the - // [ListTableSpecsResponse.next_page_token][google.cloud.automl.v1beta1.ListTableSpecsResponse.next_page_token] field of the previous - // [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs] call. - string page_token = 6; -} - -// Response message for [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. -message ListTableSpecsResponse { - // The table specs read. - repeated TableSpec table_specs = 1; - - // A token to retrieve next page of results. - // Pass to [ListTableSpecsRequest.page_token][google.cloud.automl.v1beta1.ListTableSpecsRequest.page_token] to obtain that page. - string next_page_token = 2; -} - -// Request message for [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] -message UpdateTableSpecRequest { - // Required. The table spec which replaces the resource on the server. - TableSpec table_spec = 1 [(google.api.field_behavior) = REQUIRED]; - - // The update mask applies to the resource. - google.protobuf.FieldMask update_mask = 2; -} - -// Request message for [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. -message GetColumnSpecRequest { - // Required. The resource name of the column spec to retrieve. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/ColumnSpec" - } - ]; - - // Mask specifying which fields to read. - google.protobuf.FieldMask field_mask = 2; -} - -// Request message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. -message ListColumnSpecsRequest { - // Required. The resource name of the table spec to list column specs from. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/TableSpec" - } - ]; - - // Mask specifying which fields to read. - google.protobuf.FieldMask field_mask = 2; - - // Filter expression, see go/filtering. - string filter = 3; - - // Requested page size. The server can return fewer results than requested. - // If unspecified, the server will pick a default size. - int32 page_size = 4; - - // A token identifying a page of results for the server to return. - // Typically obtained from the - // [ListColumnSpecsResponse.next_page_token][google.cloud.automl.v1beta1.ListColumnSpecsResponse.next_page_token] field of the previous - // [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs] call. - string page_token = 6; -} - -// Response message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. -message ListColumnSpecsResponse { - // The column specs read. - repeated ColumnSpec column_specs = 1; - - // A token to retrieve next page of results. - // Pass to [ListColumnSpecsRequest.page_token][google.cloud.automl.v1beta1.ListColumnSpecsRequest.page_token] to obtain that page. - string next_page_token = 2; -} - -// Request message for [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] -message UpdateColumnSpecRequest { - // Required. The column spec which replaces the resource on the server. - ColumnSpec column_spec = 1 [(google.api.field_behavior) = REQUIRED]; - - // The update mask applies to the resource. - google.protobuf.FieldMask update_mask = 2; -} - -// Request message for [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. -message CreateModelRequest { - // Required. Resource name of the parent project where the model is being created. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "locations.googleapis.com/Location" - } - ]; - - // Required. The model to create. - Model model = 4 [(google.api.field_behavior) = REQUIRED]; -} - -// Request message for [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. -message GetModelRequest { - // Required. Resource name of the model. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/Model" - } - ]; -} - -// Request message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. -message ListModelsRequest { - // Required. Resource name of the project, from which to list the models. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "locations.googleapis.com/Location" - } - ]; - - // An expression for filtering the results of the request. - // - // * `model_metadata` - for existence of the case (e.g. - // video_classification_model_metadata:*). - // * `dataset_id` - for = or !=. Some examples of using the filter are: - // - // * `image_classification_model_metadata:*` --> The model has - // image_classification_model_metadata. - // * `dataset_id=5` --> The model was created from a dataset with ID 5. - string filter = 3; - - // Requested page size. - int32 page_size = 4; - - // A token identifying a page of results for the server to return - // Typically obtained via - // [ListModelsResponse.next_page_token][google.cloud.automl.v1beta1.ListModelsResponse.next_page_token] of the previous - // [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels] call. - string page_token = 6; -} - -// Response message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. -message ListModelsResponse { - // List of models in the requested page. - repeated Model model = 1; - - // A token to retrieve next page of results. - // Pass to [ListModelsRequest.page_token][google.cloud.automl.v1beta1.ListModelsRequest.page_token] to obtain that page. - string next_page_token = 2; -} - -// Request message for [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. -message DeleteModelRequest { - // Required. Resource name of the model being deleted. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/Model" - } - ]; -} - -// Request message for [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. -message DeployModelRequest { - // The per-domain specific deployment parameters. - oneof model_deployment_metadata { - // Model deployment metadata specific to Image Object Detection. - ImageObjectDetectionModelDeploymentMetadata image_object_detection_model_deployment_metadata = 2; - - // Model deployment metadata specific to Image Classification. - ImageClassificationModelDeploymentMetadata image_classification_model_deployment_metadata = 4; - } - - // Required. Resource name of the model to deploy. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/Model" - } - ]; -} - -// Request message for [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. -message UndeployModelRequest { - // Required. Resource name of the model to undeploy. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/Model" - } - ]; -} - -// Request message for [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. -// Models need to be enabled for exporting, otherwise an error code will be -// returned. -message ExportModelRequest { - // Required. The resource name of the model to export. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/Model" - } - ]; - - // Required. The desired output location and configuration. - ModelExportOutputConfig output_config = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// Request message for [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. -message ExportEvaluatedExamplesRequest { - // Required. The resource name of the model whose evaluated examples are to - // be exported. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/Model" - } - ]; - - // Required. The desired output location and configuration. - ExportEvaluatedExamplesOutputConfig output_config = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. -message GetModelEvaluationRequest { - // Required. Resource name for the model evaluation. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/ModelEvaluation" - } - ]; -} - -// Request message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. -message ListModelEvaluationsRequest { - // Required. Resource name of the model to list the model evaluations for. - // If modelId is set as "-", this will list model evaluations from across all - // models of the parent location. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "automl.googleapis.com/Model" - } - ]; - - // An expression for filtering the results of the request. - // - // * `annotation_spec_id` - for =, != or existence. See example below for - // the last. - // - // Some examples of using the filter are: - // - // * `annotation_spec_id!=4` --> The model evaluation was done for - // annotation spec with ID different than 4. - // * `NOT annotation_spec_id:*` --> The model evaluation was done for - // aggregate of all annotation specs. - string filter = 3; - - // Requested page size. - int32 page_size = 4; - - // A token identifying a page of results for the server to return. - // Typically obtained via - // [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1beta1.ListModelEvaluationsResponse.next_page_token] of the previous - // [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations] call. - string page_token = 6; -} - -// Response message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. -message ListModelEvaluationsResponse { - // List of model evaluations in the requested page. - repeated ModelEvaluation model_evaluation = 1; - - // A token to retrieve next page of results. - // Pass to the [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1beta1.ListModelEvaluationsRequest.page_token] field of a new - // [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations] request to obtain that page. - string next_page_token = 2; -} diff --git a/google/cloud/automl_v1beta1/proto/service_pb2.py b/google/cloud/automl_v1beta1/proto/service_pb2.py deleted file mode 100644 index b8c20941..00000000 --- a/google/cloud/automl_v1beta1/proto/service_pb2.py +++ /dev/null @@ -1,3050 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/service.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.automl_v1beta1.proto import ( - annotation_payload_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__payload__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - annotation_spec_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - column_spec_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - dataset_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - image_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - io_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - model_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - model_evaluation_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - operations_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_operations__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - table_spec_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/service.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\013AutoMlProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n/google/cloud/automl_v1beta1/proto/service.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a:google/cloud/automl_v1beta1/proto/annotation_payload.proto\x1a\x37google/cloud/automl_v1beta1/proto/annotation_spec.proto\x1a\x33google/cloud/automl_v1beta1/proto/column_spec.proto\x1a/google/cloud/automl_v1beta1/proto/dataset.proto\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a-google/cloud/automl_v1beta1/proto/model.proto\x1a\x38google/cloud/automl_v1beta1/proto/model_evaluation.proto\x1a\x32google/cloud/automl_v1beta1/proto/operations.proto\x1a\x32google/cloud/automl_v1beta1/proto/table_spec.proto\x1a#google/longrunning/operations.proto\x1a google/protobuf/field_mask.proto"\x8d\x01\n\x14\x43reateDatasetRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12:\n\x07\x64\x61taset\x18\x02 \x01(\x0b\x32$.google.cloud.automl.v1beta1.DatasetB\x03\xe0\x41\x02"H\n\x11GetDatasetRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset"\x87\x01\n\x13ListDatasetsRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"g\n\x14ListDatasetsResponse\x12\x36\n\x08\x64\x61tasets\x18\x01 \x03(\x0b\x32$.google.cloud.automl.v1beta1.Dataset\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x83\x01\n\x14UpdateDatasetRequest\x12:\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32$.google.cloud.automl.v1beta1.DatasetB\x03\xe0\x41\x02\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"K\n\x14\x44\x65leteDatasetRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset"\x8d\x01\n\x11ImportDataRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset\x12\x43\n\x0cinput_config\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.InputConfigB\x03\xe0\x41\x02"\x8f\x01\n\x11\x45xportDataRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset\x12\x45\n\routput_config\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.OutputConfigB\x03\xe0\x41\x02"V\n\x18GetAnnotationSpecRequest\x12:\n\x04name\x18\x01 \x01(\tB,\xe0\x41\x02\xfa\x41&\n$automl.googleapis.com/AnnotationSpec"|\n\x13GetTableSpecRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x61utoml.googleapis.com/TableSpec\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\xb5\x01\n\x15ListTableSpecsRequest\x12\x35\n\x06parent\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"n\n\x16ListTableSpecsResponse\x12;\n\x0btable_specs\x18\x01 \x03(\x0b\x32&.google.cloud.automl.v1beta1.TableSpec\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x8a\x01\n\x16UpdateTableSpecRequest\x12?\n\ntable_spec\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.TableSpecB\x03\xe0\x41\x02\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"~\n\x14GetColumnSpecRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n automl.googleapis.com/ColumnSpec\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\xb8\x01\n\x16ListColumnSpecsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x61utoml.googleapis.com/TableSpec\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"q\n\x17ListColumnSpecsResponse\x12=\n\x0c\x63olumn_specs\x18\x01 \x03(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x8d\x01\n\x17UpdateColumnSpecRequest\x12\x41\n\x0b\x63olumn_spec\x18\x01 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpecB\x03\xe0\x41\x02\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\x87\x01\n\x12\x43reateModelRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x36\n\x05model\x18\x04 \x01(\x0b\x32".google.cloud.automl.v1beta1.ModelB\x03\xe0\x41\x02"D\n\x0fGetModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\x85\x01\n\x11ListModelsRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"`\n\x12ListModelsResponse\x12\x31\n\x05model\x18\x01 \x03(\x0b\x32".google.cloud.automl.v1beta1.Model\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"G\n\x12\x44\x65leteModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\xef\x02\n\x12\x44\x65ployModelRequest\x12\x84\x01\n0image_object_detection_model_deployment_metadata\x18\x02 \x01(\x0b\x32H.google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadataH\x00\x12\x81\x01\n.image_classification_model_deployment_metadata\x18\x04 \x01(\x0b\x32G.google.cloud.automl.v1beta1.ImageClassificationModelDeploymentMetadataH\x00\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/ModelB\x1b\n\x19model_deployment_metadata"I\n\x14UndeployModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\x99\x01\n\x12\x45xportModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12P\n\routput_config\x18\x03 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.ModelExportOutputConfigB\x03\xe0\x41\x02"\xb1\x01\n\x1e\x45xportEvaluatedExamplesRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12\\\n\routput_config\x18\x03 \x01(\x0b\x32@.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfigB\x03\xe0\x41\x02"X\n\x19GetModelEvaluationRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%automl.googleapis.com/ModelEvaluation"\x89\x01\n\x1bListModelEvaluationsRequest\x12\x33\n\x06parent\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"\x7f\n\x1cListModelEvaluationsResponse\x12\x46\n\x10model_evaluation\x18\x01 \x03(\x0b\x32,.google.cloud.automl.v1beta1.ModelEvaluation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xed\'\n\x06\x41utoMl\x12\xbd\x01\n\rCreateDataset\x12\x31.google.cloud.automl.v1beta1.CreateDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"S\x82\xd3\xe4\x93\x02<"1/v1beta1/{parent=projects/*/locations/*}/datasets:\x07\x64\x61taset\xda\x41\x0eparent,dataset\x12\xa4\x01\n\nGetDataset\x12..google.cloud.automl.v1beta1.GetDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"@\x82\xd3\xe4\x93\x02\x33\x12\x31/v1beta1/{name=projects/*/locations/*/datasets/*}\xda\x41\x04name\x12\xb7\x01\n\x0cListDatasets\x12\x30.google.cloud.automl.v1beta1.ListDatasetsRequest\x1a\x31.google.cloud.automl.v1beta1.ListDatasetsResponse"B\x82\xd3\xe4\x93\x02\x33\x12\x31/v1beta1/{parent=projects/*/locations/*}/datasets\xda\x41\x06parent\x12\xbe\x01\n\rUpdateDataset\x12\x31.google.cloud.automl.v1beta1.UpdateDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"T\x82\xd3\xe4\x93\x02\x44\x32\x39/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}:\x07\x64\x61taset\xda\x41\x07\x64\x61taset\x12\xd0\x01\n\rDeleteDataset\x12\x31.google.cloud.automl.v1beta1.DeleteDatasetRequest\x1a\x1d.google.longrunning.Operation"m\x82\xd3\xe4\x93\x02\x33*1/v1beta1/{name=projects/*/locations/*/datasets/*}\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xe6\x01\n\nImportData\x12..google.cloud.automl.v1beta1.ImportDataRequest\x1a\x1d.google.longrunning.Operation"\x88\x01\x82\xd3\xe4\x93\x02\x41"/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}\xda\x41\x04name\x12\xca\x01\n\x0eListTableSpecs\x12\x32.google.cloud.automl.v1beta1.ListTableSpecsRequest\x1a\x33.google.cloud.automl.v1beta1.ListTableSpecsResponse"O\x82\xd3\xe4\x93\x02@\x12>/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs\xda\x41\x06parent\x12\xda\x01\n\x0fUpdateTableSpec\x12\x33.google.cloud.automl.v1beta1.UpdateTableSpecRequest\x1a&.google.cloud.automl.v1beta1.TableSpec"j\x82\xd3\xe4\x93\x02W2I/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}:\ntable_spec\xda\x41\ntable_spec\x12\xc8\x01\n\rGetColumnSpec\x12\x31.google.cloud.automl.v1beta1.GetColumnSpecRequest\x1a\'.google.cloud.automl.v1beta1.ColumnSpec"[\x82\xd3\xe4\x93\x02N\x12L/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}\xda\x41\x04name\x12\xdb\x01\n\x0fListColumnSpecs\x12\x33.google.cloud.automl.v1beta1.ListColumnSpecsRequest\x1a\x34.google.cloud.automl.v1beta1.ListColumnSpecsResponse"]\x82\xd3\xe4\x93\x02N\x12L/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs\xda\x41\x06parent\x12\xee\x01\n\x10UpdateColumnSpec\x12\x34.google.cloud.automl.v1beta1.UpdateColumnSpecRequest\x1a\'.google.cloud.automl.v1beta1.ColumnSpec"{\x82\xd3\xe4\x93\x02g2X/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}:\x0b\x63olumn_spec\xda\x41\x0b\x63olumn_spec\x12\xc9\x01\n\x0b\x43reateModel\x12/.google.cloud.automl.v1beta1.CreateModelRequest\x1a\x1d.google.longrunning.Operation"j\x82\xd3\xe4\x93\x02\x38"//v1beta1/{parent=projects/*/locations/*}/models:\x05model\xda\x41\x0cparent,model\xca\x41\x1a\n\x05Model\x12\x11OperationMetadata\x12\x9c\x01\n\x08GetModel\x12,.google.cloud.automl.v1beta1.GetModelRequest\x1a".google.cloud.automl.v1beta1.Model">\x82\xd3\xe4\x93\x02\x31\x12//v1beta1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\x12\xaf\x01\n\nListModels\x12..google.cloud.automl.v1beta1.ListModelsRequest\x1a/.google.cloud.automl.v1beta1.ListModelsResponse"@\x82\xd3\xe4\x93\x02\x31\x12//v1beta1/{parent=projects/*/locations/*}/models\xda\x41\x06parent\x12\xca\x01\n\x0b\x44\x65leteModel\x12/.google.cloud.automl.v1beta1.DeleteModelRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x31*//v1beta1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xd4\x01\n\x0b\x44\x65ployModel\x12/.google.cloud.automl.v1beta1.DeployModelRequest\x1a\x1d.google.longrunning.Operation"u\x82\xd3\xe4\x93\x02;"6/v1beta1/{name=projects/*/locations/*/models/*}:deploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xda\x01\n\rUndeployModel\x12\x31.google.cloud.automl.v1beta1.UndeployModelRequest\x1a\x1d.google.longrunning.Operation"w\x82\xd3\xe4\x93\x02="8/v1beta1/{name=projects/*/locations/*/models/*}:undeploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xe3\x01\n\x0b\x45xportModel\x12/.google.cloud.automl.v1beta1.ExportModelRequest\x1a\x1d.google.longrunning.Operation"\x83\x01\x82\xd3\xe4\x93\x02;"6/v1beta1/{name=projects/*/locations/*/models/*}:export:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\x8c\x02\n\x17\x45xportEvaluatedExamples\x12;.google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest\x1a\x1d.google.longrunning.Operation"\x94\x01\x82\xd3\xe4\x93\x02L"G/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xcd\x01\n\x12GetModelEvaluation\x12\x36.google.cloud.automl.v1beta1.GetModelEvaluationRequest\x1a,.google.cloud.automl.v1beta1.ModelEvaluation"Q\x82\xd3\xe4\x93\x02\x44\x12\x42/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\xda\x41\x04name\x12\xe0\x01\n\x14ListModelEvaluations\x12\x38.google.cloud.automl.v1beta1.ListModelEvaluationsRequest\x1a\x39.google.cloud.automl.v1beta1.ListModelEvaluationsResponse"S\x82\xd3\xe4\x93\x02\x44\x12\x42/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations\xda\x41\x06parent\x1aI\xca\x41\x15\x61utoml.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xb2\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x0b\x41utoMlProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__payload__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_operations__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - ], -) - - -_CREATEDATASETREQUEST = _descriptor.Descriptor( - name="CreateDatasetRequest", - full_name="google.cloud.automl.v1beta1.CreateDatasetRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.cloud.automl.v1beta1.CreateDatasetRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="dataset", - full_name="google.cloud.automl.v1beta1.CreateDatasetRequest.dataset", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=786, - serialized_end=927, -) - - -_GETDATASETREQUEST = _descriptor.Descriptor( - name="GetDatasetRequest", - full_name="google.cloud.automl.v1beta1.GetDatasetRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.GetDatasetRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=929, - serialized_end=1001, -) - - -_LISTDATASETSREQUEST = _descriptor.Descriptor( - name="ListDatasetsRequest", - full_name="google.cloud.automl.v1beta1.ListDatasetsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.cloud.automl.v1beta1.ListDatasetsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.cloud.automl.v1beta1.ListDatasetsRequest.filter", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.cloud.automl.v1beta1.ListDatasetsRequest.page_size", - index=2, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.cloud.automl.v1beta1.ListDatasetsRequest.page_token", - index=3, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1004, - serialized_end=1139, -) - - -_LISTDATASETSRESPONSE = _descriptor.Descriptor( - name="ListDatasetsResponse", - full_name="google.cloud.automl.v1beta1.ListDatasetsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="datasets", - full_name="google.cloud.automl.v1beta1.ListDatasetsResponse.datasets", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.cloud.automl.v1beta1.ListDatasetsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1141, - serialized_end=1244, -) - - -_UPDATEDATASETREQUEST = _descriptor.Descriptor( - name="UpdateDatasetRequest", - full_name="google.cloud.automl.v1beta1.UpdateDatasetRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="dataset", - full_name="google.cloud.automl.v1beta1.UpdateDatasetRequest.dataset", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.cloud.automl.v1beta1.UpdateDatasetRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1247, - serialized_end=1378, -) - - -_DELETEDATASETREQUEST = _descriptor.Descriptor( - name="DeleteDatasetRequest", - full_name="google.cloud.automl.v1beta1.DeleteDatasetRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.DeleteDatasetRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1380, - serialized_end=1455, -) - - -_IMPORTDATAREQUEST = _descriptor.Descriptor( - name="ImportDataRequest", - full_name="google.cloud.automl.v1beta1.ImportDataRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.ImportDataRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="input_config", - full_name="google.cloud.automl.v1beta1.ImportDataRequest.input_config", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1458, - serialized_end=1599, -) - - -_EXPORTDATAREQUEST = _descriptor.Descriptor( - name="ExportDataRequest", - full_name="google.cloud.automl.v1beta1.ExportDataRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.ExportDataRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="output_config", - full_name="google.cloud.automl.v1beta1.ExportDataRequest.output_config", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1602, - serialized_end=1745, -) - - -_GETANNOTATIONSPECREQUEST = _descriptor.Descriptor( - name="GetAnnotationSpecRequest", - full_name="google.cloud.automl.v1beta1.GetAnnotationSpecRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.GetAnnotationSpecRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A&\n$automl.googleapis.com/AnnotationSpec", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1747, - serialized_end=1833, -) - - -_GETTABLESPECREQUEST = _descriptor.Descriptor( - name="GetTableSpecRequest", - full_name="google.cloud.automl.v1beta1.GetTableSpecRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.GetTableSpecRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037automl.googleapis.com/TableSpec", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="field_mask", - full_name="google.cloud.automl.v1beta1.GetTableSpecRequest.field_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1835, - serialized_end=1959, -) - - -_LISTTABLESPECSREQUEST = _descriptor.Descriptor( - name="ListTableSpecsRequest", - full_name="google.cloud.automl.v1beta1.ListTableSpecsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.cloud.automl.v1beta1.ListTableSpecsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="field_mask", - full_name="google.cloud.automl.v1beta1.ListTableSpecsRequest.field_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.cloud.automl.v1beta1.ListTableSpecsRequest.filter", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.cloud.automl.v1beta1.ListTableSpecsRequest.page_size", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.cloud.automl.v1beta1.ListTableSpecsRequest.page_token", - index=4, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1962, - serialized_end=2143, -) - - -_LISTTABLESPECSRESPONSE = _descriptor.Descriptor( - name="ListTableSpecsResponse", - full_name="google.cloud.automl.v1beta1.ListTableSpecsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_specs", - full_name="google.cloud.automl.v1beta1.ListTableSpecsResponse.table_specs", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.cloud.automl.v1beta1.ListTableSpecsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2145, - serialized_end=2255, -) - - -_UPDATETABLESPECREQUEST = _descriptor.Descriptor( - name="UpdateTableSpecRequest", - full_name="google.cloud.automl.v1beta1.UpdateTableSpecRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_spec", - full_name="google.cloud.automl.v1beta1.UpdateTableSpecRequest.table_spec", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.cloud.automl.v1beta1.UpdateTableSpecRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2258, - serialized_end=2396, -) - - -_GETCOLUMNSPECREQUEST = _descriptor.Descriptor( - name="GetColumnSpecRequest", - full_name="google.cloud.automl.v1beta1.GetColumnSpecRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.GetColumnSpecRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n automl.googleapis.com/ColumnSpec', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="field_mask", - full_name="google.cloud.automl.v1beta1.GetColumnSpecRequest.field_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2398, - serialized_end=2524, -) - - -_LISTCOLUMNSPECSREQUEST = _descriptor.Descriptor( - name="ListColumnSpecsRequest", - full_name="google.cloud.automl.v1beta1.ListColumnSpecsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.cloud.automl.v1beta1.ListColumnSpecsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037automl.googleapis.com/TableSpec", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="field_mask", - full_name="google.cloud.automl.v1beta1.ListColumnSpecsRequest.field_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.cloud.automl.v1beta1.ListColumnSpecsRequest.filter", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.cloud.automl.v1beta1.ListColumnSpecsRequest.page_size", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.cloud.automl.v1beta1.ListColumnSpecsRequest.page_token", - index=4, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2527, - serialized_end=2711, -) - - -_LISTCOLUMNSPECSRESPONSE = _descriptor.Descriptor( - name="ListColumnSpecsResponse", - full_name="google.cloud.automl.v1beta1.ListColumnSpecsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="column_specs", - full_name="google.cloud.automl.v1beta1.ListColumnSpecsResponse.column_specs", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.cloud.automl.v1beta1.ListColumnSpecsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2713, - serialized_end=2826, -) - - -_UPDATECOLUMNSPECREQUEST = _descriptor.Descriptor( - name="UpdateColumnSpecRequest", - full_name="google.cloud.automl.v1beta1.UpdateColumnSpecRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="column_spec", - full_name="google.cloud.automl.v1beta1.UpdateColumnSpecRequest.column_spec", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.cloud.automl.v1beta1.UpdateColumnSpecRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2829, - serialized_end=2970, -) - - -_CREATEMODELREQUEST = _descriptor.Descriptor( - name="CreateModelRequest", - full_name="google.cloud.automl.v1beta1.CreateModelRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.cloud.automl.v1beta1.CreateModelRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="model", - full_name="google.cloud.automl.v1beta1.CreateModelRequest.model", - index=1, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2973, - serialized_end=3108, -) - - -_GETMODELREQUEST = _descriptor.Descriptor( - name="GetModelRequest", - full_name="google.cloud.automl.v1beta1.GetModelRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.GetModelRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3110, - serialized_end=3178, -) - - -_LISTMODELSREQUEST = _descriptor.Descriptor( - name="ListModelsRequest", - full_name="google.cloud.automl.v1beta1.ListModelsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.cloud.automl.v1beta1.ListModelsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.cloud.automl.v1beta1.ListModelsRequest.filter", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.cloud.automl.v1beta1.ListModelsRequest.page_size", - index=2, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.cloud.automl.v1beta1.ListModelsRequest.page_token", - index=3, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3181, - serialized_end=3314, -) - - -_LISTMODELSRESPONSE = _descriptor.Descriptor( - name="ListModelsResponse", - full_name="google.cloud.automl.v1beta1.ListModelsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="model", - full_name="google.cloud.automl.v1beta1.ListModelsResponse.model", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.cloud.automl.v1beta1.ListModelsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3316, - serialized_end=3412, -) - - -_DELETEMODELREQUEST = _descriptor.Descriptor( - name="DeleteModelRequest", - full_name="google.cloud.automl.v1beta1.DeleteModelRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.DeleteModelRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3414, - serialized_end=3485, -) - - -_DEPLOYMODELREQUEST = _descriptor.Descriptor( - name="DeployModelRequest", - full_name="google.cloud.automl.v1beta1.DeployModelRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="image_object_detection_model_deployment_metadata", - full_name="google.cloud.automl.v1beta1.DeployModelRequest.image_object_detection_model_deployment_metadata", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="image_classification_model_deployment_metadata", - full_name="google.cloud.automl.v1beta1.DeployModelRequest.image_classification_model_deployment_metadata", - index=1, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.DeployModelRequest.name", - index=2, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="model_deployment_metadata", - full_name="google.cloud.automl.v1beta1.DeployModelRequest.model_deployment_metadata", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=3488, - serialized_end=3855, -) - - -_UNDEPLOYMODELREQUEST = _descriptor.Descriptor( - name="UndeployModelRequest", - full_name="google.cloud.automl.v1beta1.UndeployModelRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.UndeployModelRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3857, - serialized_end=3930, -) - - -_EXPORTMODELREQUEST = _descriptor.Descriptor( - name="ExportModelRequest", - full_name="google.cloud.automl.v1beta1.ExportModelRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.ExportModelRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="output_config", - full_name="google.cloud.automl.v1beta1.ExportModelRequest.output_config", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3933, - serialized_end=4086, -) - - -_EXPORTEVALUATEDEXAMPLESREQUEST = _descriptor.Descriptor( - name="ExportEvaluatedExamplesRequest", - full_name="google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="output_config", - full_name="google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest.output_config", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4089, - serialized_end=4266, -) - - -_GETMODELEVALUATIONREQUEST = _descriptor.Descriptor( - name="GetModelEvaluationRequest", - full_name="google.cloud.automl.v1beta1.GetModelEvaluationRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.GetModelEvaluationRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A'\n%automl.googleapis.com/ModelEvaluation", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4268, - serialized_end=4356, -) - - -_LISTMODELEVALUATIONSREQUEST = _descriptor.Descriptor( - name="ListModelEvaluationsRequest", - full_name="google.cloud.automl.v1beta1.ListModelEvaluationsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.cloud.automl.v1beta1.ListModelEvaluationsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.cloud.automl.v1beta1.ListModelEvaluationsRequest.filter", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.cloud.automl.v1beta1.ListModelEvaluationsRequest.page_size", - index=2, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.cloud.automl.v1beta1.ListModelEvaluationsRequest.page_token", - index=3, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4359, - serialized_end=4496, -) - - -_LISTMODELEVALUATIONSRESPONSE = _descriptor.Descriptor( - name="ListModelEvaluationsResponse", - full_name="google.cloud.automl.v1beta1.ListModelEvaluationsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="model_evaluation", - full_name="google.cloud.automl.v1beta1.ListModelEvaluationsResponse.model_evaluation", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.cloud.automl.v1beta1.ListModelEvaluationsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4498, - serialized_end=4625, -) - -_CREATEDATASETREQUEST.fields_by_name[ - "dataset" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET -) -_LISTDATASETSRESPONSE.fields_by_name[ - "datasets" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET -) -_UPDATEDATASETREQUEST.fields_by_name[ - "dataset" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET -) -_UPDATEDATASETREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_IMPORTDATAREQUEST.fields_by_name[ - "input_config" -].message_type = google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2._INPUTCONFIG -_EXPORTDATAREQUEST.fields_by_name[ - "output_config" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2._OUTPUTCONFIG -) -_GETTABLESPECREQUEST.fields_by_name[ - "field_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_LISTTABLESPECSREQUEST.fields_by_name[ - "field_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_LISTTABLESPECSRESPONSE.fields_by_name[ - "table_specs" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2._TABLESPEC -) -_UPDATETABLESPECREQUEST.fields_by_name[ - "table_spec" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2._TABLESPEC -) -_UPDATETABLESPECREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_GETCOLUMNSPECREQUEST.fields_by_name[ - "field_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_LISTCOLUMNSPECSREQUEST.fields_by_name[ - "field_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_LISTCOLUMNSPECSRESPONSE.fields_by_name[ - "column_specs" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC -) -_UPDATECOLUMNSPECREQUEST.fields_by_name[ - "column_spec" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC -) -_UPDATECOLUMNSPECREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_CREATEMODELREQUEST.fields_by_name[ - "model" -].message_type = google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2._MODEL -_LISTMODELSRESPONSE.fields_by_name[ - "model" -].message_type = google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2._MODEL -_DEPLOYMODELREQUEST.fields_by_name[ - "image_object_detection_model_deployment_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2._IMAGEOBJECTDETECTIONMODELDEPLOYMENTMETADATA -) -_DEPLOYMODELREQUEST.fields_by_name[ - "image_classification_model_deployment_metadata" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2._IMAGECLASSIFICATIONMODELDEPLOYMENTMETADATA -) -_DEPLOYMODELREQUEST.oneofs_by_name["model_deployment_metadata"].fields.append( - _DEPLOYMODELREQUEST.fields_by_name[ - "image_object_detection_model_deployment_metadata" - ] -) -_DEPLOYMODELREQUEST.fields_by_name[ - "image_object_detection_model_deployment_metadata" -].containing_oneof = _DEPLOYMODELREQUEST.oneofs_by_name["model_deployment_metadata"] -_DEPLOYMODELREQUEST.oneofs_by_name["model_deployment_metadata"].fields.append( - _DEPLOYMODELREQUEST.fields_by_name["image_classification_model_deployment_metadata"] -) -_DEPLOYMODELREQUEST.fields_by_name[ - "image_classification_model_deployment_metadata" -].containing_oneof = _DEPLOYMODELREQUEST.oneofs_by_name["model_deployment_metadata"] -_EXPORTMODELREQUEST.fields_by_name[ - "output_config" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2._MODELEXPORTOUTPUTCONFIG -) -_EXPORTEVALUATEDEXAMPLESREQUEST.fields_by_name[ - "output_config" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2._EXPORTEVALUATEDEXAMPLESOUTPUTCONFIG -) -_LISTMODELEVALUATIONSRESPONSE.fields_by_name[ - "model_evaluation" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2._MODELEVALUATION -) -DESCRIPTOR.message_types_by_name["CreateDatasetRequest"] = _CREATEDATASETREQUEST -DESCRIPTOR.message_types_by_name["GetDatasetRequest"] = _GETDATASETREQUEST -DESCRIPTOR.message_types_by_name["ListDatasetsRequest"] = _LISTDATASETSREQUEST -DESCRIPTOR.message_types_by_name["ListDatasetsResponse"] = _LISTDATASETSRESPONSE -DESCRIPTOR.message_types_by_name["UpdateDatasetRequest"] = _UPDATEDATASETREQUEST -DESCRIPTOR.message_types_by_name["DeleteDatasetRequest"] = _DELETEDATASETREQUEST -DESCRIPTOR.message_types_by_name["ImportDataRequest"] = _IMPORTDATAREQUEST -DESCRIPTOR.message_types_by_name["ExportDataRequest"] = _EXPORTDATAREQUEST -DESCRIPTOR.message_types_by_name["GetAnnotationSpecRequest"] = _GETANNOTATIONSPECREQUEST -DESCRIPTOR.message_types_by_name["GetTableSpecRequest"] = _GETTABLESPECREQUEST -DESCRIPTOR.message_types_by_name["ListTableSpecsRequest"] = _LISTTABLESPECSREQUEST -DESCRIPTOR.message_types_by_name["ListTableSpecsResponse"] = _LISTTABLESPECSRESPONSE -DESCRIPTOR.message_types_by_name["UpdateTableSpecRequest"] = _UPDATETABLESPECREQUEST -DESCRIPTOR.message_types_by_name["GetColumnSpecRequest"] = _GETCOLUMNSPECREQUEST -DESCRIPTOR.message_types_by_name["ListColumnSpecsRequest"] = _LISTCOLUMNSPECSREQUEST -DESCRIPTOR.message_types_by_name["ListColumnSpecsResponse"] = _LISTCOLUMNSPECSRESPONSE -DESCRIPTOR.message_types_by_name["UpdateColumnSpecRequest"] = _UPDATECOLUMNSPECREQUEST -DESCRIPTOR.message_types_by_name["CreateModelRequest"] = _CREATEMODELREQUEST -DESCRIPTOR.message_types_by_name["GetModelRequest"] = _GETMODELREQUEST -DESCRIPTOR.message_types_by_name["ListModelsRequest"] = _LISTMODELSREQUEST -DESCRIPTOR.message_types_by_name["ListModelsResponse"] = _LISTMODELSRESPONSE -DESCRIPTOR.message_types_by_name["DeleteModelRequest"] = _DELETEMODELREQUEST -DESCRIPTOR.message_types_by_name["DeployModelRequest"] = _DEPLOYMODELREQUEST -DESCRIPTOR.message_types_by_name["UndeployModelRequest"] = _UNDEPLOYMODELREQUEST -DESCRIPTOR.message_types_by_name["ExportModelRequest"] = _EXPORTMODELREQUEST -DESCRIPTOR.message_types_by_name[ - "ExportEvaluatedExamplesRequest" -] = _EXPORTEVALUATEDEXAMPLESREQUEST -DESCRIPTOR.message_types_by_name[ - "GetModelEvaluationRequest" -] = _GETMODELEVALUATIONREQUEST -DESCRIPTOR.message_types_by_name[ - "ListModelEvaluationsRequest" -] = _LISTMODELEVALUATIONSREQUEST -DESCRIPTOR.message_types_by_name[ - "ListModelEvaluationsResponse" -] = _LISTMODELEVALUATIONSRESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -CreateDatasetRequest = _reflection.GeneratedProtocolMessageType( - "CreateDatasetRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEDATASETREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.CreateDataset][google.cloud.automl.v1beta1 - .AutoMl.CreateDataset]. - - Attributes: - parent: - Required. The resource name of the project to create the - dataset for. - dataset: - Required. The dataset to create. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CreateDatasetRequest) - }, -) -_sym_db.RegisterMessage(CreateDatasetRequest) - -GetDatasetRequest = _reflection.GeneratedProtocolMessageType( - "GetDatasetRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETDATASETREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. - - Attributes: - name: - Required. The resource name of the dataset to retrieve. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetDatasetRequest) - }, -) -_sym_db.RegisterMessage(GetDatasetRequest) - -ListDatasetsRequest = _reflection.GeneratedProtocolMessageType( - "ListDatasetsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTDATASETSREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1. - AutoMl.ListDatasets]. - - Attributes: - parent: - Required. The resource name of the project from which to list - datasets. - filter: - An expression for filtering the results of the request. - - ``dataset_metadata`` - for existence of the case ( - e.g. ``image_classification_dataset_metadata``). Some examples - of using the filter are: - - ``translation_dataset_metadata:*`` –> The dataset has - translation_dataset_metadata. - page_size: - Requested page size. Server may return fewer results than - requested. If unspecified, server will pick a default size. - page_token: - A token identifying a page of results for the server to return - Typically obtained via [ListDatasetsResponse.next_page_token][ - google.cloud.automl.v1beta1.ListDatasetsResponse.next_page_tok - en] of the previous [AutoMl.ListDatasets][google.cloud.automl. - v1beta1.AutoMl.ListDatasets] call. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListDatasetsRequest) - }, -) -_sym_db.RegisterMessage(ListDatasetsRequest) - -ListDatasetsResponse = _reflection.GeneratedProtocolMessageType( - "ListDatasetsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTDATASETSRESPONSE, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Response message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1 - .AutoMl.ListDatasets]. - - Attributes: - datasets: - The datasets read. - next_page_token: - A token to retrieve next page of results. Pass to [ListDataset - sRequest.page_token][google.cloud.automl.v1beta1.ListDatasetsR - equest.page_token] to obtain that page. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListDatasetsResponse) - }, -) -_sym_db.RegisterMessage(ListDatasetsResponse) - -UpdateDatasetRequest = _reflection.GeneratedProtocolMessageType( - "UpdateDatasetRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEDATASETREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1beta1 - .AutoMl.UpdateDataset] - - Attributes: - dataset: - Required. The dataset which replaces the resource on the - server. - update_mask: - The update mask applies to the resource. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UpdateDatasetRequest) - }, -) -_sym_db.RegisterMessage(UpdateDatasetRequest) - -DeleteDatasetRequest = _reflection.GeneratedProtocolMessageType( - "DeleteDatasetRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEDATASETREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1beta1 - .AutoMl.DeleteDataset]. - - Attributes: - name: - Required. The resource name of the dataset to delete. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeleteDatasetRequest) - }, -) -_sym_db.RegisterMessage(DeleteDatasetRequest) - -ImportDataRequest = _reflection.GeneratedProtocolMessageType( - "ImportDataRequest", - (_message.Message,), - { - "DESCRIPTOR": _IMPORTDATAREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. - - Attributes: - name: - Required. Dataset name. Dataset must already exist. All - imported annotations and examples will be added. - input_config: - Required. The desired input location and its domain specific - semantics, if any. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImportDataRequest) - }, -) -_sym_db.RegisterMessage(ImportDataRequest) - -ExportDataRequest = _reflection.GeneratedProtocolMessageType( - "ExportDataRequest", - (_message.Message,), - { - "DESCRIPTOR": _EXPORTDATAREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. - - Attributes: - name: - Required. The resource name of the dataset. - output_config: - Required. The desired output location. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportDataRequest) - }, -) -_sym_db.RegisterMessage(ExportDataRequest) - -GetAnnotationSpecRequest = _reflection.GeneratedProtocolMessageType( - "GetAnnotationSpecRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETANNOTATIONSPECREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1b - eta1.AutoMl.GetAnnotationSpec]. - - Attributes: - name: - Required. The resource name of the annotation spec to - retrieve. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetAnnotationSpecRequest) - }, -) -_sym_db.RegisterMessage(GetAnnotationSpecRequest) - -GetTableSpecRequest = _reflection.GeneratedProtocolMessageType( - "GetTableSpecRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETTABLESPECREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.GetTableSpec][google.cloud.automl.v1beta1. - AutoMl.GetTableSpec]. - - Attributes: - name: - Required. The resource name of the table spec to retrieve. - field_mask: - Mask specifying which fields to read. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetTableSpecRequest) - }, -) -_sym_db.RegisterMessage(GetTableSpecRequest) - -ListTableSpecsRequest = _reflection.GeneratedProtocolMessageType( - "ListTableSpecsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTTABLESPECSREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.ListTableSpecs][google.cloud.automl.v1beta - 1.AutoMl.ListTableSpecs]. - - Attributes: - parent: - Required. The resource name of the dataset to list table specs - from. - field_mask: - Mask specifying which fields to read. - filter: - Filter expression, see go/filtering. - page_size: - Requested page size. The server can return fewer results than - requested. If unspecified, the server will pick a default - size. - page_token: - A token identifying a page of results for the server to - return. Typically obtained from the [ListTableSpecsResponse.ne - xt_page_token][google.cloud.automl.v1beta1.ListTableSpecsRespo - nse.next_page_token] field of the previous [AutoMl.ListTableSp - ecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs] call. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListTableSpecsRequest) - }, -) -_sym_db.RegisterMessage(ListTableSpecsRequest) - -ListTableSpecsResponse = _reflection.GeneratedProtocolMessageType( - "ListTableSpecsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTTABLESPECSRESPONSE, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Response message for [AutoMl.ListTableSpecs][google.cloud.automl.v1bet - a1.AutoMl.ListTableSpecs]. - - Attributes: - table_specs: - The table specs read. - next_page_token: - A token to retrieve next page of results. Pass to [ListTableSp - ecsRequest.page_token][google.cloud.automl.v1beta1.ListTableSp - ecsRequest.page_token] to obtain that page. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListTableSpecsResponse) - }, -) -_sym_db.RegisterMessage(ListTableSpecsResponse) - -UpdateTableSpecRequest = _reflection.GeneratedProtocolMessageType( - "UpdateTableSpecRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATETABLESPECREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.UpdateTableSpec][google.cloud.automl.v1bet - a1.AutoMl.UpdateTableSpec] - - Attributes: - table_spec: - Required. The table spec which replaces the resource on the - server. - update_mask: - The update mask applies to the resource. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UpdateTableSpecRequest) - }, -) -_sym_db.RegisterMessage(UpdateTableSpecRequest) - -GetColumnSpecRequest = _reflection.GeneratedProtocolMessageType( - "GetColumnSpecRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETCOLUMNSPECREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1 - .AutoMl.GetColumnSpec]. - - Attributes: - name: - Required. The resource name of the column spec to retrieve. - field_mask: - Mask specifying which fields to read. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetColumnSpecRequest) - }, -) -_sym_db.RegisterMessage(GetColumnSpecRequest) - -ListColumnSpecsRequest = _reflection.GeneratedProtocolMessageType( - "ListColumnSpecsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTCOLUMNSPECSREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1bet - a1.AutoMl.ListColumnSpecs]. - - Attributes: - parent: - Required. The resource name of the table spec to list column - specs from. - field_mask: - Mask specifying which fields to read. - filter: - Filter expression, see go/filtering. - page_size: - Requested page size. The server can return fewer results than - requested. If unspecified, the server will pick a default - size. - page_token: - A token identifying a page of results for the server to - return. Typically obtained from the [ListColumnSpecsResponse.n - ext_page_token][google.cloud.automl.v1beta1.ListColumnSpecsRes - ponse.next_page_token] field of the previous [AutoMl.ListColum - nSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs] - call. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListColumnSpecsRequest) - }, -) -_sym_db.RegisterMessage(ListColumnSpecsRequest) - -ListColumnSpecsResponse = _reflection.GeneratedProtocolMessageType( - "ListColumnSpecsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTCOLUMNSPECSRESPONSE, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Response message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1be - ta1.AutoMl.ListColumnSpecs]. - - Attributes: - column_specs: - The column specs read. - next_page_token: - A token to retrieve next page of results. Pass to [ListColumnS - pecsRequest.page_token][google.cloud.automl.v1beta1.ListColumn - SpecsRequest.page_token] to obtain that page. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListColumnSpecsResponse) - }, -) -_sym_db.RegisterMessage(ListColumnSpecsResponse) - -UpdateColumnSpecRequest = _reflection.GeneratedProtocolMessageType( - "UpdateColumnSpecRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATECOLUMNSPECREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.UpdateColumnSpec][google.cloud.automl.v1be - ta1.AutoMl.UpdateColumnSpec] - - Attributes: - column_spec: - Required. The column spec which replaces the resource on the - server. - update_mask: - The update mask applies to the resource. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UpdateColumnSpecRequest) - }, -) -_sym_db.RegisterMessage(UpdateColumnSpecRequest) - -CreateModelRequest = _reflection.GeneratedProtocolMessageType( - "CreateModelRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEMODELREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. - - Attributes: - parent: - Required. Resource name of the parent project where the model - is being created. - model: - Required. The model to create. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CreateModelRequest) - }, -) -_sym_db.RegisterMessage(CreateModelRequest) - -GetModelRequest = _reflection.GeneratedProtocolMessageType( - "GetModelRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETMODELREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. - - Attributes: - name: - Required. Resource name of the model. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetModelRequest) - }, -) -_sym_db.RegisterMessage(GetModelRequest) - -ListModelsRequest = _reflection.GeneratedProtocolMessageType( - "ListModelsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTMODELSREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. - - Attributes: - parent: - Required. Resource name of the project, from which to list the - models. - filter: - An expression for filtering the results of the request. - - ``model_metadata`` - for existence of the case ( - e.g. ``video_classification_model_metadata:*``). - ``dataset_id`` - - for = or !=. Some examples of using the filter are: - - ``image_classification_model_metadata:*`` –> The model has - image_classification_model_metadata. - ``dataset_id=5`` –> - The model was created from a dataset with ID 5. - page_size: - Requested page size. - page_token: - A token identifying a page of results for the server to return - Typically obtained via [ListModelsResponse.next_page_token][go - ogle.cloud.automl.v1beta1.ListModelsResponse.next_page_token] - of the previous [AutoMl.ListModels][google.cloud.automl.v1beta - 1.AutoMl.ListModels] call. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelsRequest) - }, -) -_sym_db.RegisterMessage(ListModelsRequest) - -ListModelsResponse = _reflection.GeneratedProtocolMessageType( - "ListModelsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTMODELSRESPONSE, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Response message for - [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. - - Attributes: - model: - List of models in the requested page. - next_page_token: - A token to retrieve next page of results. Pass to [ListModelsR - equest.page_token][google.cloud.automl.v1beta1.ListModelsReque - st.page_token] to obtain that page. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelsResponse) - }, -) -_sym_db.RegisterMessage(ListModelsResponse) - -DeleteModelRequest = _reflection.GeneratedProtocolMessageType( - "DeleteModelRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEMODELREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. - - Attributes: - name: - Required. Resource name of the model being deleted. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeleteModelRequest) - }, -) -_sym_db.RegisterMessage(DeleteModelRequest) - -DeployModelRequest = _reflection.GeneratedProtocolMessageType( - "DeployModelRequest", - (_message.Message,), - { - "DESCRIPTOR": _DEPLOYMODELREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. - - Attributes: - model_deployment_metadata: - The per-domain specific deployment parameters. - image_object_detection_model_deployment_metadata: - Model deployment metadata specific to Image Object Detection. - image_classification_model_deployment_metadata: - Model deployment metadata specific to Image Classification. - name: - Required. Resource name of the model to deploy. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeployModelRequest) - }, -) -_sym_db.RegisterMessage(DeployModelRequest) - -UndeployModelRequest = _reflection.GeneratedProtocolMessageType( - "UndeployModelRequest", - (_message.Message,), - { - "DESCRIPTOR": _UNDEPLOYMODELREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.UndeployModel][google.cloud.automl.v1beta1 - .AutoMl.UndeployModel]. - - Attributes: - name: - Required. Resource name of the model to undeploy. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UndeployModelRequest) - }, -) -_sym_db.RegisterMessage(UndeployModelRequest) - -ExportModelRequest = _reflection.GeneratedProtocolMessageType( - "ExportModelRequest", - (_message.Message,), - { - "DESCRIPTOR": _EXPORTMODELREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. - Models need to be enabled for exporting, otherwise an error code will - be returned. - - Attributes: - name: - Required. The resource name of the model to export. - output_config: - Required. The desired output location and configuration. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportModelRequest) - }, -) -_sym_db.RegisterMessage(ExportModelRequest) - -ExportEvaluatedExamplesRequest = _reflection.GeneratedProtocolMessageType( - "ExportEvaluatedExamplesRequest", - (_message.Message,), - { - "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.ExportEvaluatedExamples][google.cloud.auto - ml.v1beta1.AutoMl.ExportEvaluatedExamples]. - - Attributes: - name: - Required. The resource name of the model whose evaluated - examples are to be exported. - output_config: - Required. The desired output location and configuration. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest) - }, -) -_sym_db.RegisterMessage(ExportEvaluatedExamplesRequest) - -GetModelEvaluationRequest = _reflection.GeneratedProtocolMessageType( - "GetModelEvaluationRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETMODELEVALUATIONREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1 - beta1.AutoMl.GetModelEvaluation]. - - Attributes: - name: - Required. Resource name for the model evaluation. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetModelEvaluationRequest) - }, -) -_sym_db.RegisterMessage(GetModelEvaluationRequest) - -ListModelEvaluationsRequest = _reflection.GeneratedProtocolMessageType( - "ListModelEvaluationsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTMODELEVALUATIONSREQUEST, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for [AutoMl.ListModelEvaluations][google.cloud.automl. - v1beta1.AutoMl.ListModelEvaluations]. - - Attributes: - parent: - Required. Resource name of the model to list the model - evaluations for. If modelId is set as “-”, this will list - model evaluations from across all models of the parent - location. - filter: - An expression for filtering the results of the request. - - ``annotation_spec_id`` - for =, != or existence. See example - below for the last. Some examples of using the filter are: - - ``annotation_spec_id!=4`` –> The model evaluation was done - for annotation spec with ID different than 4. - ``NOT - annotation_spec_id:*`` –> The model evaluation was done for - aggregate of all annotation specs. - page_size: - Requested page size. - page_token: - A token identifying a page of results for the server to - return. Typically obtained via [ListModelEvaluationsResponse.n - ext_page_token][google.cloud.automl.v1beta1.ListModelEvaluatio - nsResponse.next_page_token] of the previous [AutoMl.ListModelE - valuations][google.cloud.automl.v1beta1.AutoMl.ListModelEvalua - tions] call. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelEvaluationsRequest) - }, -) -_sym_db.RegisterMessage(ListModelEvaluationsRequest) - -ListModelEvaluationsResponse = _reflection.GeneratedProtocolMessageType( - "ListModelEvaluationsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTMODELEVALUATIONSRESPONSE, - "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Response message for [AutoMl.ListModelEvaluations][google.cloud.automl - .v1beta1.AutoMl.ListModelEvaluations]. - - Attributes: - model_evaluation: - List of model evaluations in the requested page. - next_page_token: - A token to retrieve next page of results. Pass to the [ListMod - elEvaluationsRequest.page_token][google.cloud.automl.v1beta1.L - istModelEvaluationsRequest.page_token] field of a new [AutoMl. - ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListM - odelEvaluations] request to obtain that page. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelEvaluationsResponse) - }, -) -_sym_db.RegisterMessage(ListModelEvaluationsResponse) - - -DESCRIPTOR._options = None -_CREATEDATASETREQUEST.fields_by_name["parent"]._options = None -_CREATEDATASETREQUEST.fields_by_name["dataset"]._options = None -_GETDATASETREQUEST.fields_by_name["name"]._options = None -_LISTDATASETSREQUEST.fields_by_name["parent"]._options = None -_UPDATEDATASETREQUEST.fields_by_name["dataset"]._options = None -_DELETEDATASETREQUEST.fields_by_name["name"]._options = None -_IMPORTDATAREQUEST.fields_by_name["name"]._options = None -_IMPORTDATAREQUEST.fields_by_name["input_config"]._options = None -_EXPORTDATAREQUEST.fields_by_name["name"]._options = None -_EXPORTDATAREQUEST.fields_by_name["output_config"]._options = None -_GETANNOTATIONSPECREQUEST.fields_by_name["name"]._options = None -_GETTABLESPECREQUEST.fields_by_name["name"]._options = None -_LISTTABLESPECSREQUEST.fields_by_name["parent"]._options = None -_UPDATETABLESPECREQUEST.fields_by_name["table_spec"]._options = None -_GETCOLUMNSPECREQUEST.fields_by_name["name"]._options = None -_LISTCOLUMNSPECSREQUEST.fields_by_name["parent"]._options = None -_UPDATECOLUMNSPECREQUEST.fields_by_name["column_spec"]._options = None -_CREATEMODELREQUEST.fields_by_name["parent"]._options = None -_CREATEMODELREQUEST.fields_by_name["model"]._options = None -_GETMODELREQUEST.fields_by_name["name"]._options = None -_LISTMODELSREQUEST.fields_by_name["parent"]._options = None -_DELETEMODELREQUEST.fields_by_name["name"]._options = None -_DEPLOYMODELREQUEST.fields_by_name["name"]._options = None -_UNDEPLOYMODELREQUEST.fields_by_name["name"]._options = None -_EXPORTMODELREQUEST.fields_by_name["name"]._options = None -_EXPORTMODELREQUEST.fields_by_name["output_config"]._options = None -_EXPORTEVALUATEDEXAMPLESREQUEST.fields_by_name["name"]._options = None -_EXPORTEVALUATEDEXAMPLESREQUEST.fields_by_name["output_config"]._options = None -_GETMODELEVALUATIONREQUEST.fields_by_name["name"]._options = None -_LISTMODELEVALUATIONSREQUEST.fields_by_name["parent"]._options = None - -_AUTOML = _descriptor.ServiceDescriptor( - name="AutoMl", - full_name="google.cloud.automl.v1beta1.AutoMl", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\025automl.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", - create_key=_descriptor._internal_create_key, - serialized_start=4628, - serialized_end=9729, - methods=[ - _descriptor.MethodDescriptor( - name="CreateDataset", - full_name="google.cloud.automl.v1beta1.AutoMl.CreateDataset", - index=0, - containing_service=None, - input_type=_CREATEDATASETREQUEST, - output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET, - serialized_options=b'\202\323\344\223\002<"1/v1beta1/{parent=projects/*/locations/*}/datasets:\007dataset\332A\016parent,dataset', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetDataset", - full_name="google.cloud.automl.v1beta1.AutoMl.GetDataset", - index=1, - containing_service=None, - input_type=_GETDATASETREQUEST, - output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET, - serialized_options=b"\202\323\344\223\0023\0221/v1beta1/{name=projects/*/locations/*/datasets/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListDatasets", - full_name="google.cloud.automl.v1beta1.AutoMl.ListDatasets", - index=2, - containing_service=None, - input_type=_LISTDATASETSREQUEST, - output_type=_LISTDATASETSRESPONSE, - serialized_options=b"\202\323\344\223\0023\0221/v1beta1/{parent=projects/*/locations/*}/datasets\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateDataset", - full_name="google.cloud.automl.v1beta1.AutoMl.UpdateDataset", - index=3, - containing_service=None, - input_type=_UPDATEDATASETREQUEST, - output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET, - serialized_options=b"\202\323\344\223\002D29/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}:\007dataset\332A\007dataset", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteDataset", - full_name="google.cloud.automl.v1beta1.AutoMl.DeleteDataset", - index=4, - containing_service=None, - input_type=_DELETEDATASETREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\0023*1/v1beta1/{name=projects/*/locations/*/datasets/*}\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ImportData", - full_name="google.cloud.automl.v1beta1.AutoMl.ImportData", - index=5, - containing_service=None, - input_type=_IMPORTDATAREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002A"/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListTableSpecs", - full_name="google.cloud.automl.v1beta1.AutoMl.ListTableSpecs", - index=9, - containing_service=None, - input_type=_LISTTABLESPECSREQUEST, - output_type=_LISTTABLESPECSRESPONSE, - serialized_options=b"\202\323\344\223\002@\022>/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateTableSpec", - full_name="google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec", - index=10, - containing_service=None, - input_type=_UPDATETABLESPECREQUEST, - output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2._TABLESPEC, - serialized_options=b"\202\323\344\223\002W2I/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}:\ntable_spec\332A\ntable_spec", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetColumnSpec", - full_name="google.cloud.automl.v1beta1.AutoMl.GetColumnSpec", - index=11, - containing_service=None, - input_type=_GETCOLUMNSPECREQUEST, - output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC, - serialized_options=b"\202\323\344\223\002N\022L/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListColumnSpecs", - full_name="google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs", - index=12, - containing_service=None, - input_type=_LISTCOLUMNSPECSREQUEST, - output_type=_LISTCOLUMNSPECSRESPONSE, - serialized_options=b"\202\323\344\223\002N\022L/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateColumnSpec", - full_name="google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec", - index=13, - containing_service=None, - input_type=_UPDATECOLUMNSPECREQUEST, - output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC, - serialized_options=b"\202\323\344\223\002g2X/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}:\013column_spec\332A\013column_spec", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateModel", - full_name="google.cloud.automl.v1beta1.AutoMl.CreateModel", - index=14, - containing_service=None, - input_type=_CREATEMODELREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0028"//v1beta1/{parent=projects/*/locations/*}/models:\005model\332A\014parent,model\312A\032\n\005Model\022\021OperationMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetModel", - full_name="google.cloud.automl.v1beta1.AutoMl.GetModel", - index=15, - containing_service=None, - input_type=_GETMODELREQUEST, - output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2._MODEL, - serialized_options=b"\202\323\344\223\0021\022//v1beta1/{name=projects/*/locations/*/models/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListModels", - full_name="google.cloud.automl.v1beta1.AutoMl.ListModels", - index=16, - containing_service=None, - input_type=_LISTMODELSREQUEST, - output_type=_LISTMODELSRESPONSE, - serialized_options=b"\202\323\344\223\0021\022//v1beta1/{parent=projects/*/locations/*}/models\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteModel", - full_name="google.cloud.automl.v1beta1.AutoMl.DeleteModel", - index=17, - containing_service=None, - input_type=_DELETEMODELREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\0021*//v1beta1/{name=projects/*/locations/*/models/*}\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeployModel", - full_name="google.cloud.automl.v1beta1.AutoMl.DeployModel", - index=18, - containing_service=None, - input_type=_DEPLOYMODELREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002;"6/v1beta1/{name=projects/*/locations/*/models/*}:deploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UndeployModel", - full_name="google.cloud.automl.v1beta1.AutoMl.UndeployModel", - index=19, - containing_service=None, - input_type=_UNDEPLOYMODELREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002="8/v1beta1/{name=projects/*/locations/*/models/*}:undeploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ExportModel", - full_name="google.cloud.automl.v1beta1.AutoMl.ExportModel", - index=20, - containing_service=None, - input_type=_EXPORTMODELREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002;"6/v1beta1/{name=projects/*/locations/*/models/*}:export:\001*\332A\022name,output_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ExportEvaluatedExamples", - full_name="google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples", - index=21, - containing_service=None, - input_type=_EXPORTEVALUATEDEXAMPLESREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002L"G/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples:\001*\332A\022name,output_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetModelEvaluation", - full_name="google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation", - index=22, - containing_service=None, - input_type=_GETMODELEVALUATIONREQUEST, - output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2._MODELEVALUATION, - serialized_options=b"\202\323\344\223\002D\022B/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListModelEvaluations", - full_name="google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations", - index=23, - containing_service=None, - input_type=_LISTMODELEVALUATIONSREQUEST, - output_type=_LISTMODELEVALUATIONSRESPONSE, - serialized_options=b"\202\323\344\223\002D\022B/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_AUTOML) - -DESCRIPTOR.services_by_name["AutoMl"] = _AUTOML - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/service_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/service_pb2_grpc.py deleted file mode 100644 index efb69009..00000000 --- a/google/cloud/automl_v1beta1/proto/service_pb2_grpc.py +++ /dev/null @@ -1,546 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.cloud.automl_v1beta1.proto import ( - annotation_spec_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - column_spec_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - dataset_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - model_evaluation_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - model_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - service_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - table_spec_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) - - -class AutoMlStub(object): - """AutoML Server API. - - The resource names are assigned by the server. - The server never reuses names that it has created after the resources with - those names are deleted. - - An ID of a resource is the last element of the item's resource name. For - `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`, then - the id for the item is `{dataset_id}`. - - Currently the only supported `location_id` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateDataset = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/CreateDataset", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.CreateDatasetRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2.Dataset.FromString, - ) - self.GetDataset = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/GetDataset", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.GetDatasetRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2.Dataset.FromString, - ) - self.ListDatasets = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/ListDatasets", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListDatasetsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListDatasetsResponse.FromString, - ) - self.UpdateDataset = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/UpdateDataset", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.UpdateDatasetRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2.Dataset.FromString, - ) - self.DeleteDataset = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/DeleteDataset", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.DeleteDatasetRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ImportData = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/ImportData", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ImportDataRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ExportData = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/ExportData", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ExportDataRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetAnnotationSpec = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/GetAnnotationSpec", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.GetAnnotationSpecRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2.AnnotationSpec.FromString, - ) - self.GetTableSpec = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/GetTableSpec", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.GetTableSpecRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2.TableSpec.FromString, - ) - self.ListTableSpecs = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/ListTableSpecs", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListTableSpecsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListTableSpecsResponse.FromString, - ) - self.UpdateTableSpec = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/UpdateTableSpec", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.UpdateTableSpecRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2.TableSpec.FromString, - ) - self.GetColumnSpec = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/GetColumnSpec", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.GetColumnSpecRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2.ColumnSpec.FromString, - ) - self.ListColumnSpecs = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/ListColumnSpecs", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListColumnSpecsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListColumnSpecsResponse.FromString, - ) - self.UpdateColumnSpec = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/UpdateColumnSpec", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.UpdateColumnSpecRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2.ColumnSpec.FromString, - ) - self.CreateModel = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/CreateModel", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.CreateModelRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetModel = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/GetModel", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.GetModelRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2.Model.FromString, - ) - self.ListModels = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/ListModels", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListModelsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListModelsResponse.FromString, - ) - self.DeleteModel = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/DeleteModel", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.DeleteModelRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeployModel = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/DeployModel", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.DeployModelRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.UndeployModel = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/UndeployModel", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.UndeployModelRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ExportModel = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/ExportModel", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ExportModelRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ExportEvaluatedExamples = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/ExportEvaluatedExamples", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ExportEvaluatedExamplesRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetModelEvaluation = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/GetModelEvaluation", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.GetModelEvaluationRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2.ModelEvaluation.FromString, - ) - self.ListModelEvaluations = channel.unary_unary( - "/google.cloud.automl.v1beta1.AutoMl/ListModelEvaluations", - request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListModelEvaluationsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListModelEvaluationsResponse.FromString, - ) - - -class AutoMlServicer(object): - """AutoML Server API. - - The resource names are assigned by the server. - The server never reuses names that it has created after the resources with - those names are deleted. - - An ID of a resource is the last element of the item's resource name. For - `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`, then - the id for the item is `{dataset_id}`. - - Currently the only supported `location_id` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - """ - - def CreateDataset(self, request, context): - """Creates a dataset. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetDataset(self, request, context): - """Gets a dataset. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListDatasets(self, request, context): - """Lists datasets in a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateDataset(self, request, context): - """Updates a dataset. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteDataset(self, request, context): - """Deletes a dataset and all of its contents. - Returns empty response in the - [response][google.longrunning.Operation.response] field when it completes, - and `delete_details` in the - [metadata][google.longrunning.Operation.metadata] field. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ImportData(self, request, context): - """Imports data into a dataset. - For Tables this method can only be called on an empty Dataset. - - For Tables: - * A - [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params] - parameter must be explicitly set. - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it completes. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ExportData(self, request, context): - """Exports dataset's data to the provided output location. - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it completes. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetAnnotationSpec(self, request, context): - """Gets an annotation spec. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetTableSpec(self, request, context): - """Gets a table spec. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListTableSpecs(self, request, context): - """Lists table specs in a dataset. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateTableSpec(self, request, context): - """Updates a table spec. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetColumnSpec(self, request, context): - """Gets a column spec. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListColumnSpecs(self, request, context): - """Lists column specs in a table spec. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateColumnSpec(self, request, context): - """Updates a column spec. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateModel(self, request, context): - """Creates a model. - Returns a Model in the [response][google.longrunning.Operation.response] - field when it completes. - When you create a model, several model evaluations are created for it: - a global evaluation, and one evaluation for each annotation spec. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetModel(self, request, context): - """Gets a model. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListModels(self, request, context): - """Lists models. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteModel(self, request, context): - """Deletes a model. - Returns `google.protobuf.Empty` in the - [response][google.longrunning.Operation.response] field when it completes, - and `delete_details` in the - [metadata][google.longrunning.Operation.metadata] field. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeployModel(self, request, context): - """Deploys a model. If a model is already deployed, deploying it with the - same parameters has no effect. Deploying with different parametrs - (as e.g. changing - - [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number]) - will reset the deployment state without pausing the model's availability. - - Only applicable for Text Classification, Image Object Detection , Tables, and Image Segmentation; all other domains manage - deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it completes. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UndeployModel(self, request, context): - """Undeploys a model. If the model is not deployed this method has no effect. - - Only applicable for Text Classification, Image Object Detection and Tables; - all other domains manage deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it completes. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ExportModel(self, request, context): - """Exports a trained, "export-able", model to a user specified Google Cloud - Storage location. A model is considered export-able if and only if it has - an export format defined for it in - - [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it completes. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ExportEvaluatedExamples(self, request, context): - """Exports examples on which the model was evaluated (i.e. which were in the - TEST set of the dataset the model was created from), together with their - ground truth annotations and the annotations created (predicted) by the - model. - The examples, ground truth and predictions are exported in the state - they were at the moment the model was evaluated. - - This export is available only for 30 days since the model evaluation is - created. - - Currently only available for Tables. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it completes. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetModelEvaluation(self, request, context): - """Gets a model evaluation. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListModelEvaluations(self, request, context): - """Lists model evaluations. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_AutoMlServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateDataset": grpc.unary_unary_rpc_method_handler( - servicer.CreateDataset, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.CreateDatasetRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2.Dataset.SerializeToString, - ), - "GetDataset": grpc.unary_unary_rpc_method_handler( - servicer.GetDataset, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.GetDatasetRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2.Dataset.SerializeToString, - ), - "ListDatasets": grpc.unary_unary_rpc_method_handler( - servicer.ListDatasets, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListDatasetsRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListDatasetsResponse.SerializeToString, - ), - "UpdateDataset": grpc.unary_unary_rpc_method_handler( - servicer.UpdateDataset, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.UpdateDatasetRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2.Dataset.SerializeToString, - ), - "DeleteDataset": grpc.unary_unary_rpc_method_handler( - servicer.DeleteDataset, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.DeleteDatasetRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ImportData": grpc.unary_unary_rpc_method_handler( - servicer.ImportData, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ImportDataRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ExportData": grpc.unary_unary_rpc_method_handler( - servicer.ExportData, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ExportDataRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetAnnotationSpec": grpc.unary_unary_rpc_method_handler( - servicer.GetAnnotationSpec, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.GetAnnotationSpecRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2.AnnotationSpec.SerializeToString, - ), - "GetTableSpec": grpc.unary_unary_rpc_method_handler( - servicer.GetTableSpec, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.GetTableSpecRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2.TableSpec.SerializeToString, - ), - "ListTableSpecs": grpc.unary_unary_rpc_method_handler( - servicer.ListTableSpecs, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListTableSpecsRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListTableSpecsResponse.SerializeToString, - ), - "UpdateTableSpec": grpc.unary_unary_rpc_method_handler( - servicer.UpdateTableSpec, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.UpdateTableSpecRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2.TableSpec.SerializeToString, - ), - "GetColumnSpec": grpc.unary_unary_rpc_method_handler( - servicer.GetColumnSpec, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.GetColumnSpecRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2.ColumnSpec.SerializeToString, - ), - "ListColumnSpecs": grpc.unary_unary_rpc_method_handler( - servicer.ListColumnSpecs, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListColumnSpecsRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListColumnSpecsResponse.SerializeToString, - ), - "UpdateColumnSpec": grpc.unary_unary_rpc_method_handler( - servicer.UpdateColumnSpec, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.UpdateColumnSpecRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2.ColumnSpec.SerializeToString, - ), - "CreateModel": grpc.unary_unary_rpc_method_handler( - servicer.CreateModel, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.CreateModelRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetModel": grpc.unary_unary_rpc_method_handler( - servicer.GetModel, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.GetModelRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2.Model.SerializeToString, - ), - "ListModels": grpc.unary_unary_rpc_method_handler( - servicer.ListModels, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListModelsRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListModelsResponse.SerializeToString, - ), - "DeleteModel": grpc.unary_unary_rpc_method_handler( - servicer.DeleteModel, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.DeleteModelRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeployModel": grpc.unary_unary_rpc_method_handler( - servicer.DeployModel, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.DeployModelRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "UndeployModel": grpc.unary_unary_rpc_method_handler( - servicer.UndeployModel, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.UndeployModelRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ExportModel": grpc.unary_unary_rpc_method_handler( - servicer.ExportModel, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ExportModelRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ExportEvaluatedExamples": grpc.unary_unary_rpc_method_handler( - servicer.ExportEvaluatedExamples, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ExportEvaluatedExamplesRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetModelEvaluation": grpc.unary_unary_rpc_method_handler( - servicer.GetModelEvaluation, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.GetModelEvaluationRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2.ModelEvaluation.SerializeToString, - ), - "ListModelEvaluations": grpc.unary_unary_rpc_method_handler( - servicer.ListModelEvaluations, - request_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListModelEvaluationsRequest.FromString, - response_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_service__pb2.ListModelEvaluationsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.cloud.automl.v1beta1.AutoMl", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/google/cloud/automl_v1beta1/proto/table_spec.proto b/google/cloud/automl_v1beta1/proto/table_spec.proto deleted file mode 100644 index bc3fc744..00000000 --- a/google/cloud/automl_v1beta1/proto/table_spec.proto +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/api/resource.proto"; -import "google/cloud/automl/v1beta1/io.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// A specification of a relational table. -// The table's schema is represented via its child column specs. It is -// pre-populated as part of ImportData by schema inference algorithm, the -// version of which is a required parameter of ImportData InputConfig. -// Note: While working with a table, at times the schema may be -// inconsistent with the data in the table (e.g. string in a FLOAT64 column). -// The consistency validation is done upon creation of a model. -// Used by: -// * Tables -message TableSpec { - option (google.api.resource) = { - type: "automl.googleapis.com/TableSpec" - pattern: "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}" - }; - - // Output only. The resource name of the table spec. - // Form: - // - // `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/tableSpecs/{table_spec_id}` - string name = 1; - - // column_spec_id of the time column. Only used if the parent dataset's - // ml_use_column_spec_id is not set. Used to split rows into TRAIN, VALIDATE - // and TEST sets such that oldest rows go to TRAIN set, newest to TEST, and - // those in between to VALIDATE. - // Required type: TIMESTAMP. - // If both this column and ml_use_column are not set, then ML use of all rows - // will be assigned by AutoML. NOTE: Updates of this field will instantly - // affect any other users concurrently working with the dataset. - string time_column_spec_id = 2; - - // Output only. The number of rows (i.e. examples) in the table. - int64 row_count = 3; - - // Output only. The number of valid rows (i.e. without values that don't match - // DataType-s of their columns). - int64 valid_row_count = 4; - - // Output only. The number of columns of the table. That is, the number of - // child ColumnSpec-s. - int64 column_count = 7; - - // Output only. Input configs via which data currently residing in the table - // had been imported. - repeated InputConfig input_configs = 5; - - // Used to perform consistent read-modify-write updates. If not set, a blind - // "overwrite" update happens. - string etag = 6; -} diff --git a/google/cloud/automl_v1beta1/proto/table_spec_pb2.py b/google/cloud/automl_v1beta1/proto/table_spec_pb2.py deleted file mode 100644 index a6934cea..00000000 --- a/google/cloud/automl_v1beta1/proto/table_spec_pb2.py +++ /dev/null @@ -1,248 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/table_spec.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.automl_v1beta1.proto import ( - io_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2, -) -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/table_spec.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/table_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x1cgoogle/api/annotations.proto"\xc1\x02\n\tTableSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1b\n\x13time_column_spec_id\x18\x02 \x01(\t\x12\x11\n\trow_count\x18\x03 \x01(\x03\x12\x17\n\x0fvalid_row_count\x18\x04 \x01(\x03\x12\x14\n\x0c\x63olumn_count\x18\x07 \x01(\x03\x12?\n\rinput_configs\x18\x05 \x03(\x0b\x32(.google.cloud.automl.v1beta1.InputConfig\x12\x0c\n\x04\x65tag\x18\x06 \x01(\t:x\xea\x41u\n\x1f\x61utoml.googleapis.com/TableSpec\x12Rprojects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}B\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_TABLESPEC = _descriptor.Descriptor( - name="TableSpec", - full_name="google.cloud.automl.v1beta1.TableSpec", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.automl.v1beta1.TableSpec.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="time_column_spec_id", - full_name="google.cloud.automl.v1beta1.TableSpec.time_column_spec_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_count", - full_name="google.cloud.automl.v1beta1.TableSpec.row_count", - index=2, - number=3, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="valid_row_count", - full_name="google.cloud.automl.v1beta1.TableSpec.valid_row_count", - index=3, - number=4, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_count", - full_name="google.cloud.automl.v1beta1.TableSpec.column_count", - index=4, - number=7, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="input_configs", - full_name="google.cloud.automl.v1beta1.TableSpec.input_configs", - index=5, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="etag", - full_name="google.cloud.automl.v1beta1.TableSpec.etag", - index=6, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"\352Au\n\037automl.googleapis.com/TableSpec\022Rprojects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=185, - serialized_end=506, -) - -_TABLESPEC.fields_by_name[ - "input_configs" -].message_type = google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2._INPUTCONFIG -DESCRIPTOR.message_types_by_name["TableSpec"] = _TABLESPEC -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -TableSpec = _reflection.GeneratedProtocolMessageType( - "TableSpec", - (_message.Message,), - { - "DESCRIPTOR": _TABLESPEC, - "__module__": "google.cloud.automl_v1beta1.proto.table_spec_pb2", - "__doc__": """A specification of a relational table. The table’s schema is - represented via its child column specs. It is pre-populated as part of - ImportData by schema inference algorithm, the version of which is a - required parameter of ImportData InputConfig. Note: While working with - a table, at times the schema may be inconsistent with the data in the - table (e.g. string in a FLOAT64 column). The consistency validation is - done upon creation of a model. Used by: \* Tables - - Attributes: - name: - Output only. The resource name of the table spec. Form: ``pro - jects/{project_id}/locations/{location_id}/datasets/{dataset_i - d}/tableSpecs/{table_spec_id}`` - time_column_spec_id: - column_spec_id of the time column. Only used if the parent - dataset’s ml_use_column_spec_id is not set. Used to split rows - into TRAIN, VALIDATE and TEST sets such that oldest rows go to - TRAIN set, newest to TEST, and those in between to VALIDATE. - Required type: TIMESTAMP. If both this column and - ml_use_column are not set, then ML use of all rows will be - assigned by AutoML. NOTE: Updates of this field will instantly - affect any other users concurrently working with the dataset. - row_count: - Output only. The number of rows (i.e. examples) in the table. - valid_row_count: - Output only. The number of valid rows (i.e. without values - that don’t match DataType-s of their columns). - column_count: - Output only. The number of columns of the table. That is, the - number of child ColumnSpec-s. - input_configs: - Output only. Input configs via which data currently residing - in the table had been imported. - etag: - Used to perform consistent read-modify-write updates. If not - set, a blind “overwrite” update happens. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TableSpec) - }, -) -_sym_db.RegisterMessage(TableSpec) - - -DESCRIPTOR._options = None -_TABLESPEC._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/table_spec_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/table_spec_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/table_spec_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/tables.proto b/google/cloud/automl_v1beta1/proto/tables.proto deleted file mode 100644 index 5327f5e7..00000000 --- a/google/cloud/automl_v1beta1/proto/tables.proto +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/cloud/automl/v1beta1/classification.proto"; -import "google/cloud/automl/v1beta1/column_spec.proto"; -import "google/cloud/automl/v1beta1/data_items.proto"; -import "google/cloud/automl/v1beta1/data_stats.proto"; -import "google/cloud/automl/v1beta1/ranges.proto"; -import "google/cloud/automl/v1beta1/regression.proto"; -import "google/cloud/automl/v1beta1/temporal.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/timestamp.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// Metadata for a dataset used for AutoML Tables. -message TablesDatasetMetadata { - // Output only. The table_spec_id of the primary table of this dataset. - string primary_table_spec_id = 1; - - // column_spec_id of the primary table's column that should be used as the - // training & prediction target. - // This column must be non-nullable and have one of following data types - // (otherwise model creation will error): - // - // * CATEGORY - // - // * FLOAT64 - // - // If the type is CATEGORY , only up to - // 100 unique values may exist in that column across all rows. - // - // NOTE: Updates of this field will instantly affect any other users - // concurrently working with the dataset. - string target_column_spec_id = 2; - - // column_spec_id of the primary table's column that should be used as the - // weight column, i.e. the higher the value the more important the row will be - // during model training. - // Required type: FLOAT64. - // Allowed values: 0 to 10000, inclusive on both ends; 0 means the row is - // ignored for training. - // If not set all rows are assumed to have equal weight of 1. - // NOTE: Updates of this field will instantly affect any other users - // concurrently working with the dataset. - string weight_column_spec_id = 3; - - // column_spec_id of the primary table column which specifies a possible ML - // use of the row, i.e. the column will be used to split the rows into TRAIN, - // VALIDATE and TEST sets. - // Required type: STRING. - // This column, if set, must either have all of `TRAIN`, `VALIDATE`, `TEST` - // among its values, or only have `TEST`, `UNASSIGNED` values. In the latter - // case the rows with `UNASSIGNED` value will be assigned by AutoML. Note - // that if a given ml use distribution makes it impossible to create a "good" - // model, that call will error describing the issue. - // If both this column_spec_id and primary table's time_column_spec_id are not - // set, then all rows are treated as `UNASSIGNED`. - // NOTE: Updates of this field will instantly affect any other users - // concurrently working with the dataset. - string ml_use_column_spec_id = 4; - - // Output only. Correlations between - // - // [TablesDatasetMetadata.target_column_spec_id][google.cloud.automl.v1beta1.TablesDatasetMetadata.target_column_spec_id], - // and other columns of the - // - // [TablesDatasetMetadataprimary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_spec_id]. - // Only set if the target column is set. Mapping from other column spec id to - // its CorrelationStats with the target column. - // This field may be stale, see the stats_update_time field for - // for the timestamp at which these stats were last updated. - map target_column_correlations = 6; - - // Output only. The most recent timestamp when target_column_correlations - // field and all descendant ColumnSpec.data_stats and - // ColumnSpec.top_correlated_columns fields were last (re-)generated. Any - // changes that happened to the dataset afterwards are not reflected in these - // fields values. The regeneration happens in the background on a best effort - // basis. - google.protobuf.Timestamp stats_update_time = 7; -} - -// Model metadata specific to AutoML Tables. -message TablesModelMetadata { - // Additional optimization objective configuration. Required for - // `MAXIMIZE_PRECISION_AT_RECALL` and `MAXIMIZE_RECALL_AT_PRECISION`, - // otherwise unused. - oneof additional_optimization_objective_config { - // Required when optimization_objective is "MAXIMIZE_PRECISION_AT_RECALL". - // Must be between 0 and 1, inclusive. - float optimization_objective_recall_value = 17; - - // Required when optimization_objective is "MAXIMIZE_RECALL_AT_PRECISION". - // Must be between 0 and 1, inclusive. - float optimization_objective_precision_value = 18; - } - - // Column spec of the dataset's primary table's column the model is - // predicting. Snapshotted when model creation started. - // Only 3 fields are used: - // name - May be set on CreateModel, if it's not then the ColumnSpec - // corresponding to the current target_column_spec_id of the dataset - // the model is trained from is used. - // If neither is set, CreateModel will error. - // display_name - Output only. - // data_type - Output only. - ColumnSpec target_column_spec = 2; - - // Column specs of the dataset's primary table's columns, on which - // the model is trained and which are used as the input for predictions. - // The - // - // [target_column][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - // as well as, according to dataset's state upon model creation, - // - // [weight_column][google.cloud.automl.v1beta1.TablesDatasetMetadata.weight_column_spec_id], - // and - // - // [ml_use_column][google.cloud.automl.v1beta1.TablesDatasetMetadata.ml_use_column_spec_id] - // must never be included here. - // - // Only 3 fields are used: - // - // * name - May be set on CreateModel, if set only the columns specified are - // used, otherwise all primary table's columns (except the ones listed - // above) are used for the training and prediction input. - // - // * display_name - Output only. - // - // * data_type - Output only. - repeated ColumnSpec input_feature_column_specs = 3; - - // Objective function the model is optimizing towards. The training process - // creates a model that maximizes/minimizes the value of the objective - // function over the validation set. - // - // The supported optimization objectives depend on the prediction type. - // If the field is not set, a default objective function is used. - // - // CLASSIFICATION_BINARY: - // "MAXIMIZE_AU_ROC" (default) - Maximize the area under the receiver - // operating characteristic (ROC) curve. - // "MINIMIZE_LOG_LOSS" - Minimize log loss. - // "MAXIMIZE_AU_PRC" - Maximize the area under the precision-recall curve. - // "MAXIMIZE_PRECISION_AT_RECALL" - Maximize precision for a specified - // recall value. - // "MAXIMIZE_RECALL_AT_PRECISION" - Maximize recall for a specified - // precision value. - // - // CLASSIFICATION_MULTI_CLASS : - // "MINIMIZE_LOG_LOSS" (default) - Minimize log loss. - // - // - // REGRESSION: - // "MINIMIZE_RMSE" (default) - Minimize root-mean-squared error (RMSE). - // "MINIMIZE_MAE" - Minimize mean-absolute error (MAE). - // "MINIMIZE_RMSLE" - Minimize root-mean-squared log error (RMSLE). - string optimization_objective = 4; - - // Output only. Auxiliary information for each of the - // input_feature_column_specs with respect to this particular model. - repeated TablesModelColumnInfo tables_model_column_info = 5; - - // Required. The train budget of creating this model, expressed in milli node - // hours i.e. 1,000 value in this field means 1 node hour. - // - // The training cost of the model will not exceed this budget. The final cost - // will be attempted to be close to the budget, though may end up being (even) - // noticeably smaller - at the backend's discretion. This especially may - // happen when further model training ceases to provide any improvements. - // - // If the budget is set to a value known to be insufficient to train a - // model for the given dataset, the training won't be attempted and - // will error. - // - // The train budget must be between 1,000 and 72,000 milli node hours, - // inclusive. - int64 train_budget_milli_node_hours = 6; - - // Output only. The actual training cost of the model, expressed in milli - // node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed - // to not exceed the train budget. - int64 train_cost_milli_node_hours = 7; - - // Use the entire training budget. This disables the early stopping feature. - // By default, the early stopping feature is enabled, which means that AutoML - // Tables might stop training before the entire training budget has been used. - bool disable_early_stopping = 12; -} - -// Contains annotation details specific to Tables. -message TablesAnnotation { - // Output only. A confidence estimate between 0.0 and 1.0, inclusive. A higher - // value means greater confidence in the returned value. - // For - // - // [target_column_spec][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - // of FLOAT64 data type the score is not populated. - float score = 1; - - // Output only. Only populated when - // - // [target_column_spec][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - // has FLOAT64 data type. An interval in which the exactly correct target - // value has 95% chance to be in. - DoubleRange prediction_interval = 4; - - // The predicted value of the row's - // - // [target_column][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]. - // The value depends on the column's DataType: - // - // * CATEGORY - the predicted (with the above confidence `score`) CATEGORY - // value. - // - // * FLOAT64 - the predicted (with above `prediction_interval`) FLOAT64 value. - google.protobuf.Value value = 2; - - // Output only. Auxiliary information for each of the model's - // - // [input_feature_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] - // with respect to this particular prediction. - // If no other fields than - // - // [column_spec_name][google.cloud.automl.v1beta1.TablesModelColumnInfo.column_spec_name] - // and - // - // [column_display_name][google.cloud.automl.v1beta1.TablesModelColumnInfo.column_display_name] - // would be populated, then this whole field is not. - repeated TablesModelColumnInfo tables_model_column_info = 3; - - // Output only. Stores the prediction score for the baseline example, which - // is defined as the example with all values set to their baseline values. - // This is used as part of the Sampled Shapley explanation of the model's - // prediction. This field is populated only when feature importance is - // requested. For regression models, this holds the baseline prediction for - // the baseline example. For classification models, this holds the baseline - // prediction for the baseline example for the argmax class. - float baseline_score = 5; -} - -// An information specific to given column and Tables Model, in context -// of the Model and the predictions created by it. -message TablesModelColumnInfo { - // Output only. The name of the ColumnSpec describing the column. Not - // populated when this proto is outputted to BigQuery. - string column_spec_name = 1; - - // Output only. The display name of the column (same as the display_name of - // its ColumnSpec). - string column_display_name = 2; - - // Output only. When given as part of a Model (always populated): - // Measurement of how much model predictions correctness on the TEST data - // depend on values in this column. A value between 0 and 1, higher means - // higher influence. These values are normalized - for all input feature - // columns of a given model they add to 1. - // - // When given back by Predict (populated iff - // [feature_importance - // param][google.cloud.automl.v1beta1.PredictRequest.params] is set) or Batch - // Predict (populated iff - // [feature_importance][google.cloud.automl.v1beta1.PredictRequest.params] - // param is set): - // Measurement of how impactful for the prediction returned for the given row - // the value in this column was. Specifically, the feature importance - // specifies the marginal contribution that the feature made to the prediction - // score compared to the baseline score. These values are computed using the - // Sampled Shapley method. - float feature_importance = 3; -} diff --git a/google/cloud/automl_v1beta1/proto/tables_pb2.py b/google/cloud/automl_v1beta1/proto/tables_pb2.py deleted file mode 100644 index 7335fe3d..00000000 --- a/google/cloud/automl_v1beta1/proto/tables_pb2.py +++ /dev/null @@ -1,984 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/tables.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.automl_v1beta1.proto import ( - classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - column_spec_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - data_items_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__items__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - data_stats_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__stats__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - ranges_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_ranges__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - regression_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_regression__pb2, -) -from google.cloud.automl_v1beta1.proto import ( - temporal_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2, -) -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/tables.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n.google/cloud/automl_v1beta1/proto/tables.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x33google/cloud/automl_v1beta1/proto/column_spec.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_stats.proto\x1a.google/cloud/automl_v1beta1/proto/ranges.proto\x1a\x32google/cloud/automl_v1beta1/proto/regression.proto\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xb0\x03\n\x15TablesDatasetMetadata\x12\x1d\n\x15primary_table_spec_id\x18\x01 \x01(\t\x12\x1d\n\x15target_column_spec_id\x18\x02 \x01(\t\x12\x1d\n\x15weight_column_spec_id\x18\x03 \x01(\t\x12\x1d\n\x15ml_use_column_spec_id\x18\x04 \x01(\t\x12t\n\x1atarget_column_correlations\x18\x06 \x03(\x0b\x32P.google.cloud.automl.v1beta1.TablesDatasetMetadata.TargetColumnCorrelationsEntry\x12\x35\n\x11stats_update_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1an\n\x1dTargetColumnCorrelationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12<\n\x05value\x18\x02 \x01(\x0b\x32-.google.cloud.automl.v1beta1.CorrelationStats:\x02\x38\x01"\x96\x04\n\x13TablesModelMetadata\x12-\n#optimization_objective_recall_value\x18\x11 \x01(\x02H\x00\x12\x30\n&optimization_objective_precision_value\x18\x12 \x01(\x02H\x00\x12\x43\n\x12target_column_spec\x18\x02 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12K\n\x1ainput_feature_column_specs\x18\x03 \x03(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12\x1e\n\x16optimization_objective\x18\x04 \x01(\t\x12T\n\x18tables_model_column_info\x18\x05 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03\x12\x1e\n\x16\x64isable_early_stopping\x18\x0c \x01(\x08\x42*\n(additional_optimization_objective_config"\xfd\x01\n\x10TablesAnnotation\x12\r\n\x05score\x18\x01 \x01(\x02\x12\x45\n\x13prediction_interval\x18\x04 \x01(\x0b\x32(.google.cloud.automl.v1beta1.DoubleRange\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\x12T\n\x18tables_model_column_info\x18\x03 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo\x12\x16\n\x0e\x62\x61seline_score\x18\x05 \x01(\x02"j\n\x15TablesModelColumnInfo\x12\x18\n\x10\x63olumn_spec_name\x18\x01 \x01(\t\x12\x1b\n\x13\x63olumn_display_name\x18\x02 \x01(\t\x12\x1a\n\x12\x66\x65\x61ture_importance\x18\x03 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__items__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__stats__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_ranges__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_regression__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2.DESCRIPTOR, - google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY = _descriptor.Descriptor( - name="TargetColumnCorrelationsEntry", - full_name="google.cloud.automl.v1beta1.TablesDatasetMetadata.TargetColumnCorrelationsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.automl.v1beta1.TablesDatasetMetadata.TargetColumnCorrelationsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.automl.v1beta1.TablesDatasetMetadata.TargetColumnCorrelationsEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=858, - serialized_end=968, -) - -_TABLESDATASETMETADATA = _descriptor.Descriptor( - name="TablesDatasetMetadata", - full_name="google.cloud.automl.v1beta1.TablesDatasetMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="primary_table_spec_id", - full_name="google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_spec_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="target_column_spec_id", - full_name="google.cloud.automl.v1beta1.TablesDatasetMetadata.target_column_spec_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="weight_column_spec_id", - full_name="google.cloud.automl.v1beta1.TablesDatasetMetadata.weight_column_spec_id", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="ml_use_column_spec_id", - full_name="google.cloud.automl.v1beta1.TablesDatasetMetadata.ml_use_column_spec_id", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="target_column_correlations", - full_name="google.cloud.automl.v1beta1.TablesDatasetMetadata.target_column_correlations", - index=4, - number=6, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="stats_update_time", - full_name="google.cloud.automl.v1beta1.TablesDatasetMetadata.stats_update_time", - index=5, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=536, - serialized_end=968, -) - - -_TABLESMODELMETADATA = _descriptor.Descriptor( - name="TablesModelMetadata", - full_name="google.cloud.automl.v1beta1.TablesModelMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="optimization_objective_recall_value", - full_name="google.cloud.automl.v1beta1.TablesModelMetadata.optimization_objective_recall_value", - index=0, - number=17, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="optimization_objective_precision_value", - full_name="google.cloud.automl.v1beta1.TablesModelMetadata.optimization_objective_precision_value", - index=1, - number=18, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="target_column_spec", - full_name="google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="input_feature_column_specs", - full_name="google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs", - index=3, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="optimization_objective", - full_name="google.cloud.automl.v1beta1.TablesModelMetadata.optimization_objective", - index=4, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="tables_model_column_info", - full_name="google.cloud.automl.v1beta1.TablesModelMetadata.tables_model_column_info", - index=5, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="train_budget_milli_node_hours", - full_name="google.cloud.automl.v1beta1.TablesModelMetadata.train_budget_milli_node_hours", - index=6, - number=6, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="train_cost_milli_node_hours", - full_name="google.cloud.automl.v1beta1.TablesModelMetadata.train_cost_milli_node_hours", - index=7, - number=7, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="disable_early_stopping", - full_name="google.cloud.automl.v1beta1.TablesModelMetadata.disable_early_stopping", - index=8, - number=12, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="additional_optimization_objective_config", - full_name="google.cloud.automl.v1beta1.TablesModelMetadata.additional_optimization_objective_config", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=971, - serialized_end=1505, -) - - -_TABLESANNOTATION = _descriptor.Descriptor( - name="TablesAnnotation", - full_name="google.cloud.automl.v1beta1.TablesAnnotation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="score", - full_name="google.cloud.automl.v1beta1.TablesAnnotation.score", - index=0, - number=1, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="prediction_interval", - full_name="google.cloud.automl.v1beta1.TablesAnnotation.prediction_interval", - index=1, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.automl.v1beta1.TablesAnnotation.value", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="tables_model_column_info", - full_name="google.cloud.automl.v1beta1.TablesAnnotation.tables_model_column_info", - index=3, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="baseline_score", - full_name="google.cloud.automl.v1beta1.TablesAnnotation.baseline_score", - index=4, - number=5, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1508, - serialized_end=1761, -) - - -_TABLESMODELCOLUMNINFO = _descriptor.Descriptor( - name="TablesModelColumnInfo", - full_name="google.cloud.automl.v1beta1.TablesModelColumnInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="column_spec_name", - full_name="google.cloud.automl.v1beta1.TablesModelColumnInfo.column_spec_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_display_name", - full_name="google.cloud.automl.v1beta1.TablesModelColumnInfo.column_display_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="feature_importance", - full_name="google.cloud.automl.v1beta1.TablesModelColumnInfo.feature_importance", - index=2, - number=3, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1763, - serialized_end=1869, -) - -_TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY.fields_by_name[ - "value" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__stats__pb2._CORRELATIONSTATS -) -_TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY.containing_type = ( - _TABLESDATASETMETADATA -) -_TABLESDATASETMETADATA.fields_by_name[ - "target_column_correlations" -].message_type = _TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY -_TABLESDATASETMETADATA.fields_by_name[ - "stats_update_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_TABLESMODELMETADATA.fields_by_name[ - "target_column_spec" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC -) -_TABLESMODELMETADATA.fields_by_name[ - "input_feature_column_specs" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC -) -_TABLESMODELMETADATA.fields_by_name[ - "tables_model_column_info" -].message_type = _TABLESMODELCOLUMNINFO -_TABLESMODELMETADATA.oneofs_by_name[ - "additional_optimization_objective_config" -].fields.append( - _TABLESMODELMETADATA.fields_by_name["optimization_objective_recall_value"] -) -_TABLESMODELMETADATA.fields_by_name[ - "optimization_objective_recall_value" -].containing_oneof = _TABLESMODELMETADATA.oneofs_by_name[ - "additional_optimization_objective_config" -] -_TABLESMODELMETADATA.oneofs_by_name[ - "additional_optimization_objective_config" -].fields.append( - _TABLESMODELMETADATA.fields_by_name["optimization_objective_precision_value"] -) -_TABLESMODELMETADATA.fields_by_name[ - "optimization_objective_precision_value" -].containing_oneof = _TABLESMODELMETADATA.oneofs_by_name[ - "additional_optimization_objective_config" -] -_TABLESANNOTATION.fields_by_name[ - "prediction_interval" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_ranges__pb2._DOUBLERANGE -) -_TABLESANNOTATION.fields_by_name[ - "value" -].message_type = google_dot_protobuf_dot_struct__pb2._VALUE -_TABLESANNOTATION.fields_by_name[ - "tables_model_column_info" -].message_type = _TABLESMODELCOLUMNINFO -DESCRIPTOR.message_types_by_name["TablesDatasetMetadata"] = _TABLESDATASETMETADATA -DESCRIPTOR.message_types_by_name["TablesModelMetadata"] = _TABLESMODELMETADATA -DESCRIPTOR.message_types_by_name["TablesAnnotation"] = _TABLESANNOTATION -DESCRIPTOR.message_types_by_name["TablesModelColumnInfo"] = _TABLESMODELCOLUMNINFO -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -TablesDatasetMetadata = _reflection.GeneratedProtocolMessageType( - "TablesDatasetMetadata", - (_message.Message,), - { - "TargetColumnCorrelationsEntry": _reflection.GeneratedProtocolMessageType( - "TargetColumnCorrelationsEntry", - (_message.Message,), - { - "DESCRIPTOR": _TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY, - "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TablesDatasetMetadata.TargetColumnCorrelationsEntry) - }, - ), - "DESCRIPTOR": _TABLESDATASETMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", - "__doc__": """Metadata for a dataset used for AutoML Tables. - - Attributes: - primary_table_spec_id: - Output only. The table_spec_id of the primary table of this - dataset. - target_column_spec_id: - column_spec_id of the primary table’s column that should be - used as the training & prediction target. This column must be - non-nullable and have one of following data types (otherwise - model creation will error): - CATEGORY - FLOAT64 If the - type is CATEGORY , only up to 100 unique values may exist in - that column across all rows. NOTE: Updates of this field will - instantly affect any other users concurrently working with the - dataset. - weight_column_spec_id: - column_spec_id of the primary table’s column that should be - used as the weight column, i.e. the higher the value the more - important the row will be during model training. Required - type: FLOAT64. Allowed values: 0 to 10000, inclusive on both - ends; 0 means the row is ignored for training. If not set all - rows are assumed to have equal weight of 1. NOTE: Updates of - this field will instantly affect any other users concurrently - working with the dataset. - ml_use_column_spec_id: - column_spec_id of the primary table column which specifies a - possible ML use of the row, i.e. the column will be used to - split the rows into TRAIN, VALIDATE and TEST sets. Required - type: STRING. This column, if set, must either have all of - ``TRAIN``, ``VALIDATE``, ``TEST`` among its values, or only - have ``TEST``, ``UNASSIGNED`` values. In the latter case the - rows with ``UNASSIGNED`` value will be assigned by AutoML. - Note that if a given ml use distribution makes it impossible - to create a “good” model, that call will error describing the - issue. If both this column_spec_id and primary table’s - time_column_spec_id are not set, then all rows are treated as - ``UNASSIGNED``. NOTE: Updates of this field will instantly - affect any other users concurrently working with the dataset. - target_column_correlations: - Output only. Correlations between [TablesDatasetMetadata.targ - et_column_spec_id][google.cloud.automl.v1beta1.TablesDatasetMe - tadata.target_column_spec_id], and other columns of the [Tabl - esDatasetMetadataprimary_table][google.cloud.automl.v1beta1.Ta - blesDatasetMetadata.primary_table_spec_id]. Only set if the - target column is set. Mapping from other column spec id to its - CorrelationStats with the target column. This field may be - stale, see the stats_update_time field for for the timestamp - at which these stats were last updated. - stats_update_time: - Output only. The most recent timestamp when - target_column_correlations field and all descendant - ColumnSpec.data_stats and ColumnSpec.top_correlated_columns - fields were last (re-)generated. Any changes that happened to - the dataset afterwards are not reflected in these fields - values. The regeneration happens in the background on a best - effort basis. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TablesDatasetMetadata) - }, -) -_sym_db.RegisterMessage(TablesDatasetMetadata) -_sym_db.RegisterMessage(TablesDatasetMetadata.TargetColumnCorrelationsEntry) - -TablesModelMetadata = _reflection.GeneratedProtocolMessageType( - "TablesModelMetadata", - (_message.Message,), - { - "DESCRIPTOR": _TABLESMODELMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", - "__doc__": """Model metadata specific to AutoML Tables. - - Attributes: - additional_optimization_objective_config: - Additional optimization objective configuration. Required for - ``MAXIMIZE_PRECISION_AT_RECALL`` and - ``MAXIMIZE_RECALL_AT_PRECISION``, otherwise unused. - optimization_objective_recall_value: - Required when optimization_objective is - “MAXIMIZE_PRECISION_AT_RECALL”. Must be between 0 and 1, - inclusive. - optimization_objective_precision_value: - Required when optimization_objective is - “MAXIMIZE_RECALL_AT_PRECISION”. Must be between 0 and 1, - inclusive. - target_column_spec: - Column spec of the dataset’s primary table’s column the model - is predicting. Snapshotted when model creation started. Only 3 - fields are used: name - May be set on CreateModel, if it’s not - then the ColumnSpec corresponding to the current - target_column_spec_id of the dataset the model is trained from - is used. If neither is set, CreateModel will error. - display_name - Output only. data_type - Output only. - input_feature_column_specs: - Column specs of the dataset’s primary table’s columns, on - which the model is trained and which are used as the input for - predictions. The [target_column][google.cloud.automl.v1beta1. - TablesModelMetadata.target_column_spec] as well as, according - to dataset’s state upon model creation, [weight_column][googl - e.cloud.automl.v1beta1.TablesDatasetMetadata.weight_column_spe - c_id], and [ml_use_column][google.cloud.automl.v1beta1.Tables - DatasetMetadata.ml_use_column_spec_id] must never be included - here. Only 3 fields are used: - name - May be set on - CreateModel, if set only the columns specified are used, - otherwise all primary table’s columns (except the ones - listed above) are used for the training and prediction input. - - display_name - Output only. - data_type - Output only. - optimization_objective: - Objective function the model is optimizing towards. The - training process creates a model that maximizes/minimizes the - value of the objective function over the validation set. The - supported optimization objectives depend on the prediction - type. If the field is not set, a default objective function is - used. CLASSIFICATION_BINARY: “MAXIMIZE_AU_ROC” (default) - - Maximize the area under the receiver operating characteristic - (ROC) curve. “MINIMIZE_LOG_LOSS” - Minimize log loss. - “MAXIMIZE_AU_PRC” - Maximize the area under the precision- - recall curve. “MAXIMIZE_PRECISION_AT_RECALL” - Maximize - precision for a specified recall value. - “MAXIMIZE_RECALL_AT_PRECISION” - Maximize recall for a - specified precision value. CLASSIFICATION_MULTI_CLASS : - “MINIMIZE_LOG_LOSS” (default) - Minimize log loss. - REGRESSION: “MINIMIZE_RMSE” (default) - Minimize root-mean- - squared error (RMSE). “MINIMIZE_MAE” - Minimize mean-absolute - error (MAE). “MINIMIZE_RMSLE” - Minimize root-mean-squared log - error (RMSLE). - tables_model_column_info: - Output only. Auxiliary information for each of the - input_feature_column_specs with respect to this particular - model. - train_budget_milli_node_hours: - Required. The train budget of creating this model, expressed - in milli node hours i.e. 1,000 value in this field means 1 - node hour. The training cost of the model will not exceed - this budget. The final cost will be attempted to be close to - the budget, though may end up being (even) noticeably smaller - - at the backend’s discretion. This especially may happen when - further model training ceases to provide any improvements. If - the budget is set to a value known to be insufficient to train - a model for the given dataset, the training won’t be attempted - and will error. The train budget must be between 1,000 and - 72,000 milli node hours, inclusive. - train_cost_milli_node_hours: - Output only. The actual training cost of the model, expressed - in milli node hours, i.e. 1,000 value in this field means 1 - node hour. Guaranteed to not exceed the train budget. - disable_early_stopping: - Use the entire training budget. This disables the early - stopping feature. By default, the early stopping feature is - enabled, which means that AutoML Tables might stop training - before the entire training budget has been used. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TablesModelMetadata) - }, -) -_sym_db.RegisterMessage(TablesModelMetadata) - -TablesAnnotation = _reflection.GeneratedProtocolMessageType( - "TablesAnnotation", - (_message.Message,), - { - "DESCRIPTOR": _TABLESANNOTATION, - "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", - "__doc__": """Contains annotation details specific to Tables. - - Attributes: - score: - Output only. A confidence estimate between 0.0 and 1.0, - inclusive. A higher value means greater confidence in the - returned value. For [target_column_spec][google.cloud.automl. - v1beta1.TablesModelMetadata.target_column_spec] of FLOAT64 - data type the score is not populated. - prediction_interval: - Output only. Only populated when [target_column_spec][google. - cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - has FLOAT64 data type. An interval in which the exactly - correct target value has 95% chance to be in. - value: - The predicted value of the row’s [target_column][google.cloud - .automl.v1beta1.TablesModelMetadata.target_column_spec]. The - value depends on the column’s DataType: - CATEGORY - the - predicted (with the above confidence ``score``) CATEGORY - value. - FLOAT64 - the predicted (with above - ``prediction_interval``) FLOAT64 value. - tables_model_column_info: - Output only. Auxiliary information for each of the model’s [i - nput_feature_column_specs][google.cloud.automl.v1beta1.TablesM - odelMetadata.input_feature_column_specs] with respect to this - particular prediction. If no other fields than [column_spec_n - ame][google.cloud.automl.v1beta1.TablesModelColumnInfo.column\_ - spec_name] and [column_display_name][google.cloud.automl.v1be - ta1.TablesModelColumnInfo.column_display_name] would be - populated, then this whole field is not. - baseline_score: - Output only. Stores the prediction score for the baseline - example, which is defined as the example with all values set - to their baseline values. This is used as part of the Sampled - Shapley explanation of the model’s prediction. This field is - populated only when feature importance is requested. For - regression models, this holds the baseline prediction for the - baseline example. For classification models, this holds the - baseline prediction for the baseline example for the argmax - class. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TablesAnnotation) - }, -) -_sym_db.RegisterMessage(TablesAnnotation) - -TablesModelColumnInfo = _reflection.GeneratedProtocolMessageType( - "TablesModelColumnInfo", - (_message.Message,), - { - "DESCRIPTOR": _TABLESMODELCOLUMNINFO, - "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", - "__doc__": """An information specific to given column and Tables Model, in context - of the Model and the predictions created by it. - - Attributes: - column_spec_name: - Output only. The name of the ColumnSpec describing the column. - Not populated when this proto is outputted to BigQuery. - column_display_name: - Output only. The display name of the column (same as the - display_name of its ColumnSpec). - feature_importance: - Output only. When given as part of a Model (always populated): - Measurement of how much model predictions correctness on the - TEST data depend on values in this column. A value between 0 - and 1, higher means higher influence. These values are - normalized - for all input feature columns of a given model - they add to 1. When given back by Predict (populated iff - [feature_importance - param][google.cloud.automl.v1beta1.PredictRequest.params] is - set) or Batch Predict (populated iff [feature_importance][goog - le.cloud.automl.v1beta1.PredictRequest.params] param is set): - Measurement of how impactful for the prediction returned for - the given row the value in this column was. Specifically, the - feature importance specifies the marginal contribution that - the feature made to the prediction score compared to the - baseline score. These values are computed using the Sampled - Shapley method. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TablesModelColumnInfo) - }, -) -_sym_db.RegisterMessage(TablesModelColumnInfo) - - -DESCRIPTOR._options = None -_TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/tables_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/tables_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/tables_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/temporal.proto b/google/cloud/automl_v1beta1/proto/temporal.proto deleted file mode 100644 index 76db8887..00000000 --- a/google/cloud/automl_v1beta1/proto/temporal.proto +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/protobuf/duration.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// A time period inside of an example that has a time dimension (e.g. video). -message TimeSegment { - // Start of the time segment (inclusive), represented as the duration since - // the example start. - google.protobuf.Duration start_time_offset = 1; - - // End of the time segment (exclusive), represented as the duration since the - // example start. - google.protobuf.Duration end_time_offset = 2; -} diff --git a/google/cloud/automl_v1beta1/proto/temporal_pb2.py b/google/cloud/automl_v1beta1/proto/temporal_pb2.py deleted file mode 100644 index 309c4644..00000000 --- a/google/cloud/automl_v1beta1/proto/temporal_pb2.py +++ /dev/null @@ -1,125 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/temporal.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/temporal.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n0google/cloud/automl_v1beta1/proto/temporal.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/api/annotations.proto"w\n\x0bTimeSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_TIMESEGMENT = _descriptor.Descriptor( - name="TimeSegment", - full_name="google.cloud.automl.v1beta1.TimeSegment", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="start_time_offset", - full_name="google.cloud.automl.v1beta1.TimeSegment.start_time_offset", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time_offset", - full_name="google.cloud.automl.v1beta1.TimeSegment.end_time_offset", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=143, - serialized_end=262, -) - -_TIMESEGMENT.fields_by_name[ - "start_time_offset" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_TIMESEGMENT.fields_by_name[ - "end_time_offset" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -DESCRIPTOR.message_types_by_name["TimeSegment"] = _TIMESEGMENT -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -TimeSegment = _reflection.GeneratedProtocolMessageType( - "TimeSegment", - (_message.Message,), - { - "DESCRIPTOR": _TIMESEGMENT, - "__module__": "google.cloud.automl_v1beta1.proto.temporal_pb2", - "__doc__": """A time period inside of an example that has a time dimension - (e.g. video). - - Attributes: - start_time_offset: - Start of the time segment (inclusive), represented as the - duration since the example start. - end_time_offset: - End of the time segment (exclusive), represented as the - duration since the example start. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TimeSegment) - }, -) -_sym_db.RegisterMessage(TimeSegment) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/temporal_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/temporal_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/temporal_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/text.proto b/google/cloud/automl_v1beta1/proto/text.proto deleted file mode 100644 index f6f33185..00000000 --- a/google/cloud/automl_v1beta1/proto/text.proto +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/cloud/automl/v1beta1/classification.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_outer_classname = "TextProto"; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// Dataset metadata for classification. -message TextClassificationDatasetMetadata { - // Required. Type of the classification problem. - ClassificationType classification_type = 1; -} - -// Model metadata that is specific to text classification. -message TextClassificationModelMetadata { - // Output only. Classification type of the dataset used to train this model. - ClassificationType classification_type = 3; -} - -// Dataset metadata that is specific to text extraction -message TextExtractionDatasetMetadata { - -} - -// Model metadata that is specific to text extraction. -message TextExtractionModelMetadata { - -} - -// Dataset metadata for text sentiment. -message TextSentimentDatasetMetadata { - // Required. A sentiment is expressed as an integer ordinal, where higher value - // means a more positive sentiment. The range of sentiments that will be used - // is between 0 and sentiment_max (inclusive on both ends), and all the values - // in the range must be represented in the dataset before a model can be - // created. - // sentiment_max value must be between 1 and 10 (inclusive). - int32 sentiment_max = 1; -} - -// Model metadata that is specific to text sentiment. -message TextSentimentModelMetadata { - -} diff --git a/google/cloud/automl_v1beta1/proto/text_extraction.proto b/google/cloud/automl_v1beta1/proto/text_extraction.proto deleted file mode 100644 index cfb0e0b3..00000000 --- a/google/cloud/automl_v1beta1/proto/text_extraction.proto +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/cloud/automl/v1beta1/text_segment.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// Annotation for identifying spans of text. -message TextExtractionAnnotation { - // Required. Text extraction annotations can either be a text segment or a - // text relation. - oneof annotation { - // An entity annotation will set this, which is the part of the original - // text to which the annotation pertains. - TextSegment text_segment = 3; - } - - // Output only. A confidence estimate between 0.0 and 1.0. A higher value - // means greater confidence in correctness of the annotation. - float score = 1; -} - -// Model evaluation metrics for text extraction problems. -message TextExtractionEvaluationMetrics { - // Metrics for a single confidence threshold. - message ConfidenceMetricsEntry { - // Output only. The confidence threshold value used to compute the metrics. - // Only annotations with score of at least this threshold are considered to - // be ones the model would return. - float confidence_threshold = 1; - - // Output only. Recall under the given confidence threshold. - float recall = 3; - - // Output only. Precision under the given confidence threshold. - float precision = 4; - - // Output only. The harmonic mean of recall and precision. - float f1_score = 5; - } - - // Output only. The Area under precision recall curve metric. - float au_prc = 1; - - // Output only. Metrics that have confidence thresholds. - // Precision-recall curve can be derived from it. - repeated ConfidenceMetricsEntry confidence_metrics_entries = 2; -} diff --git a/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py b/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py deleted file mode 100644 index ab21cf1e..00000000 --- a/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py +++ /dev/null @@ -1,354 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/text_extraction.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.automl_v1beta1.proto import ( - text_segment_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2, -) -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/text_extraction.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n7google/cloud/automl_v1beta1/proto/text_extraction.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x34google/cloud/automl_v1beta1/proto/text_segment.proto\x1a\x1cgoogle/api/annotations.proto"y\n\x18TextExtractionAnnotation\x12@\n\x0ctext_segment\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSegmentH\x00\x12\r\n\x05score\x18\x01 \x01(\x02\x42\x0c\n\nannotation"\x97\x02\n\x1fTextExtractionEvaluationMetrics\x12\x0e\n\x06\x61u_prc\x18\x01 \x01(\x02\x12w\n\x1a\x63onfidence_metrics_entries\x18\x02 \x03(\x0b\x32S.google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry\x1ak\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x03 \x01(\x02\x12\x11\n\tprecision\x18\x04 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x05 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_TEXTEXTRACTIONANNOTATION = _descriptor.Descriptor( - name="TextExtractionAnnotation", - full_name="google.cloud.automl.v1beta1.TextExtractionAnnotation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="text_segment", - full_name="google.cloud.automl.v1beta1.TextExtractionAnnotation.text_segment", - index=0, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="score", - full_name="google.cloud.automl.v1beta1.TextExtractionAnnotation.score", - index=1, - number=1, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="annotation", - full_name="google.cloud.automl.v1beta1.TextExtractionAnnotation.annotation", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=172, - serialized_end=293, -) - - -_TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY = _descriptor.Descriptor( - name="ConfidenceMetricsEntry", - full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="confidence_threshold", - full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry.confidence_threshold", - index=0, - number=1, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="recall", - full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry.recall", - index=1, - number=3, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="precision", - full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry.precision", - index=2, - number=4, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="f1_score", - full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry.f1_score", - index=3, - number=5, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=468, - serialized_end=575, -) - -_TEXTEXTRACTIONEVALUATIONMETRICS = _descriptor.Descriptor( - name="TextExtractionEvaluationMetrics", - full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="au_prc", - full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.au_prc", - index=0, - number=1, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="confidence_metrics_entries", - full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.confidence_metrics_entries", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=296, - serialized_end=575, -) - -_TEXTEXTRACTIONANNOTATION.fields_by_name[ - "text_segment" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2._TEXTSEGMENT -) -_TEXTEXTRACTIONANNOTATION.oneofs_by_name["annotation"].fields.append( - _TEXTEXTRACTIONANNOTATION.fields_by_name["text_segment"] -) -_TEXTEXTRACTIONANNOTATION.fields_by_name[ - "text_segment" -].containing_oneof = _TEXTEXTRACTIONANNOTATION.oneofs_by_name["annotation"] -_TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY.containing_type = ( - _TEXTEXTRACTIONEVALUATIONMETRICS -) -_TEXTEXTRACTIONEVALUATIONMETRICS.fields_by_name[ - "confidence_metrics_entries" -].message_type = _TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY -DESCRIPTOR.message_types_by_name["TextExtractionAnnotation"] = _TEXTEXTRACTIONANNOTATION -DESCRIPTOR.message_types_by_name[ - "TextExtractionEvaluationMetrics" -] = _TEXTEXTRACTIONEVALUATIONMETRICS -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -TextExtractionAnnotation = _reflection.GeneratedProtocolMessageType( - "TextExtractionAnnotation", - (_message.Message,), - { - "DESCRIPTOR": _TEXTEXTRACTIONANNOTATION, - "__module__": "google.cloud.automl_v1beta1.proto.text_extraction_pb2", - "__doc__": """Annotation for identifying spans of text. - - Attributes: - annotation: - Required. Text extraction annotations can either be a text - segment or a text relation. - text_segment: - An entity annotation will set this, which is the part of the - original text to which the annotation pertains. - score: - Output only. A confidence estimate between 0.0 and 1.0. A - higher value means greater confidence in correctness of the - annotation. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionAnnotation) - }, -) -_sym_db.RegisterMessage(TextExtractionAnnotation) - -TextExtractionEvaluationMetrics = _reflection.GeneratedProtocolMessageType( - "TextExtractionEvaluationMetrics", - (_message.Message,), - { - "ConfidenceMetricsEntry": _reflection.GeneratedProtocolMessageType( - "ConfidenceMetricsEntry", - (_message.Message,), - { - "DESCRIPTOR": _TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, - "__module__": "google.cloud.automl_v1beta1.proto.text_extraction_pb2", - "__doc__": """Metrics for a single confidence threshold. - - Attributes: - confidence_threshold: - Output only. The confidence threshold value used to compute - the metrics. Only annotations with score of at least this - threshold are considered to be ones the model would return. - recall: - Output only. Recall under the given confidence threshold. - precision: - Output only. Precision under the given confidence threshold. - f1_score: - Output only. The harmonic mean of recall and precision. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry) - }, - ), - "DESCRIPTOR": _TEXTEXTRACTIONEVALUATIONMETRICS, - "__module__": "google.cloud.automl_v1beta1.proto.text_extraction_pb2", - "__doc__": """Model evaluation metrics for text extraction problems. - - Attributes: - au_prc: - Output only. The Area under precision recall curve metric. - confidence_metrics_entries: - Output only. Metrics that have confidence thresholds. - Precision-recall curve can be derived from it. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics) - }, -) -_sym_db.RegisterMessage(TextExtractionEvaluationMetrics) -_sym_db.RegisterMessage(TextExtractionEvaluationMetrics.ConfidenceMetricsEntry) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/text_extraction_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/text_extraction_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/text_extraction_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/text_pb2.py b/google/cloud/automl_v1beta1/proto/text_pb2.py deleted file mode 100644 index 6d9b725d..00000000 --- a/google/cloud/automl_v1beta1/proto/text_pb2.py +++ /dev/null @@ -1,345 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/text.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.automl_v1beta1.proto import ( - classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2, -) -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/text.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\tTextProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n,google/cloud/automl_v1beta1/proto/text.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto"q\n!TextClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"o\n\x1fTextClassificationModelMetadata\x12L\n\x13\x63lassification_type\x18\x03 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"\x1f\n\x1dTextExtractionDatasetMetadata"\x1d\n\x1bTextExtractionModelMetadata"5\n\x1cTextSentimentDatasetMetadata\x12\x15\n\rsentiment_max\x18\x01 \x01(\x05"\x1c\n\x1aTextSentimentModelMetadataB\xb0\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\tTextProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_TEXTCLASSIFICATIONDATASETMETADATA = _descriptor.Descriptor( - name="TextClassificationDatasetMetadata", - full_name="google.cloud.automl.v1beta1.TextClassificationDatasetMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="classification_type", - full_name="google.cloud.automl.v1beta1.TextClassificationDatasetMetadata.classification_type", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=163, - serialized_end=276, -) - - -_TEXTCLASSIFICATIONMODELMETADATA = _descriptor.Descriptor( - name="TextClassificationModelMetadata", - full_name="google.cloud.automl.v1beta1.TextClassificationModelMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="classification_type", - full_name="google.cloud.automl.v1beta1.TextClassificationModelMetadata.classification_type", - index=0, - number=3, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=278, - serialized_end=389, -) - - -_TEXTEXTRACTIONDATASETMETADATA = _descriptor.Descriptor( - name="TextExtractionDatasetMetadata", - full_name="google.cloud.automl.v1beta1.TextExtractionDatasetMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=391, - serialized_end=422, -) - - -_TEXTEXTRACTIONMODELMETADATA = _descriptor.Descriptor( - name="TextExtractionModelMetadata", - full_name="google.cloud.automl.v1beta1.TextExtractionModelMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=424, - serialized_end=453, -) - - -_TEXTSENTIMENTDATASETMETADATA = _descriptor.Descriptor( - name="TextSentimentDatasetMetadata", - full_name="google.cloud.automl.v1beta1.TextSentimentDatasetMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="sentiment_max", - full_name="google.cloud.automl.v1beta1.TextSentimentDatasetMetadata.sentiment_max", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=455, - serialized_end=508, -) - - -_TEXTSENTIMENTMODELMETADATA = _descriptor.Descriptor( - name="TextSentimentModelMetadata", - full_name="google.cloud.automl.v1beta1.TextSentimentModelMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=510, - serialized_end=538, -) - -_TEXTCLASSIFICATIONDATASETMETADATA.fields_by_name[ - "classification_type" -].enum_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2._CLASSIFICATIONTYPE -) -_TEXTCLASSIFICATIONMODELMETADATA.fields_by_name[ - "classification_type" -].enum_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2._CLASSIFICATIONTYPE -) -DESCRIPTOR.message_types_by_name[ - "TextClassificationDatasetMetadata" -] = _TEXTCLASSIFICATIONDATASETMETADATA -DESCRIPTOR.message_types_by_name[ - "TextClassificationModelMetadata" -] = _TEXTCLASSIFICATIONMODELMETADATA -DESCRIPTOR.message_types_by_name[ - "TextExtractionDatasetMetadata" -] = _TEXTEXTRACTIONDATASETMETADATA -DESCRIPTOR.message_types_by_name[ - "TextExtractionModelMetadata" -] = _TEXTEXTRACTIONMODELMETADATA -DESCRIPTOR.message_types_by_name[ - "TextSentimentDatasetMetadata" -] = _TEXTSENTIMENTDATASETMETADATA -DESCRIPTOR.message_types_by_name[ - "TextSentimentModelMetadata" -] = _TEXTSENTIMENTMODELMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -TextClassificationDatasetMetadata = _reflection.GeneratedProtocolMessageType( - "TextClassificationDatasetMetadata", - (_message.Message,), - { - "DESCRIPTOR": _TEXTCLASSIFICATIONDATASETMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", - "__doc__": """Dataset metadata for classification. - - Attributes: - classification_type: - Required. Type of the classification problem. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextClassificationDatasetMetadata) - }, -) -_sym_db.RegisterMessage(TextClassificationDatasetMetadata) - -TextClassificationModelMetadata = _reflection.GeneratedProtocolMessageType( - "TextClassificationModelMetadata", - (_message.Message,), - { - "DESCRIPTOR": _TEXTCLASSIFICATIONMODELMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", - "__doc__": """Model metadata that is specific to text classification. - - Attributes: - classification_type: - Output only. Classification type of the dataset used to train - this model. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextClassificationModelMetadata) - }, -) -_sym_db.RegisterMessage(TextClassificationModelMetadata) - -TextExtractionDatasetMetadata = _reflection.GeneratedProtocolMessageType( - "TextExtractionDatasetMetadata", - (_message.Message,), - { - "DESCRIPTOR": _TEXTEXTRACTIONDATASETMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", - "__doc__": """Dataset metadata that is specific to text extraction""", - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionDatasetMetadata) - }, -) -_sym_db.RegisterMessage(TextExtractionDatasetMetadata) - -TextExtractionModelMetadata = _reflection.GeneratedProtocolMessageType( - "TextExtractionModelMetadata", - (_message.Message,), - { - "DESCRIPTOR": _TEXTEXTRACTIONMODELMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", - "__doc__": """Model metadata that is specific to text extraction.""", - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionModelMetadata) - }, -) -_sym_db.RegisterMessage(TextExtractionModelMetadata) - -TextSentimentDatasetMetadata = _reflection.GeneratedProtocolMessageType( - "TextSentimentDatasetMetadata", - (_message.Message,), - { - "DESCRIPTOR": _TEXTSENTIMENTDATASETMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", - "__doc__": """Dataset metadata for text sentiment. - - Attributes: - sentiment_max: - Required. A sentiment is expressed as an integer ordinal, - where higher value means a more positive sentiment. The range - of sentiments that will be used is between 0 and sentiment_max - (inclusive on both ends), and all the values in the range must - be represented in the dataset before a model can be created. - sentiment_max value must be between 1 and 10 (inclusive). - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSentimentDatasetMetadata) - }, -) -_sym_db.RegisterMessage(TextSentimentDatasetMetadata) - -TextSentimentModelMetadata = _reflection.GeneratedProtocolMessageType( - "TextSentimentModelMetadata", - (_message.Message,), - { - "DESCRIPTOR": _TEXTSENTIMENTMODELMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", - "__doc__": """Model metadata that is specific to text sentiment.""", - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSentimentModelMetadata) - }, -) -_sym_db.RegisterMessage(TextSentimentModelMetadata) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/text_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/text_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/text_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/text_segment.proto b/google/cloud/automl_v1beta1/proto/text_segment.proto deleted file mode 100644 index 94b17d93..00000000 --- a/google/cloud/automl_v1beta1/proto/text_segment.proto +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_outer_classname = "TextSegmentProto"; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// A contiguous part of a text (string), assuming it has an UTF-8 NFC encoding. -message TextSegment { - // Output only. The content of the TextSegment. - string content = 3; - - // Required. Zero-based character index of the first character of the text - // segment (counting characters from the beginning of the text). - int64 start_offset = 1; - - // Required. Zero-based character index of the first character past the end of - // the text segment (counting character from the beginning of the text). - // The character at the end_offset is NOT included in the text segment. - int64 end_offset = 2; -} diff --git a/google/cloud/automl_v1beta1/proto/text_segment_pb2.py b/google/cloud/automl_v1beta1/proto/text_segment_pb2.py deleted file mode 100644 index ed5ae997..00000000 --- a/google/cloud/automl_v1beta1/proto/text_segment_pb2.py +++ /dev/null @@ -1,139 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/text_segment.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/text_segment.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\020TextSegmentProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n4google/cloud/automl_v1beta1/proto/text_segment.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"H\n\x0bTextSegment\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\x14\n\x0cstart_offset\x18\x01 \x01(\x03\x12\x12\n\nend_offset\x18\x02 \x01(\x03\x42\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x10TextSegmentProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], -) - - -_TEXTSEGMENT = _descriptor.Descriptor( - name="TextSegment", - full_name="google.cloud.automl.v1beta1.TextSegment", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="content", - full_name="google.cloud.automl.v1beta1.TextSegment.content", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_offset", - full_name="google.cloud.automl.v1beta1.TextSegment.start_offset", - index=1, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_offset", - full_name="google.cloud.automl.v1beta1.TextSegment.end_offset", - index=2, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=115, - serialized_end=187, -) - -DESCRIPTOR.message_types_by_name["TextSegment"] = _TEXTSEGMENT -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -TextSegment = _reflection.GeneratedProtocolMessageType( - "TextSegment", - (_message.Message,), - { - "DESCRIPTOR": _TEXTSEGMENT, - "__module__": "google.cloud.automl_v1beta1.proto.text_segment_pb2", - "__doc__": """A contiguous part of a text (string), assuming it has an UTF-8 NFC - encoding. - - Attributes: - content: - Output only. The content of the TextSegment. - start_offset: - Required. Zero-based character index of the first character of - the text segment (counting characters from the beginning of - the text). - end_offset: - Required. Zero-based character index of the first character - past the end of the text segment (counting character from the - beginning of the text). The character at the end_offset is NOT - included in the text segment. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSegment) - }, -) -_sym_db.RegisterMessage(TextSegment) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/text_segment_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/text_segment_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/text_segment_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/text_sentiment.proto b/google/cloud/automl_v1beta1/proto/text_sentiment.proto deleted file mode 100644 index 5444c52b..00000000 --- a/google/cloud/automl_v1beta1/proto/text_sentiment.proto +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/cloud/automl/v1beta1/classification.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_outer_classname = "TextSentimentProto"; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// Contains annotation details specific to text sentiment. -message TextSentimentAnnotation { - // Output only. The sentiment with the semantic, as given to the - // [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData] when populating the dataset from which the model used - // for the prediction had been trained. - // The sentiment values are between 0 and - // Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive), - // with higher value meaning more positive sentiment. They are completely - // relative, i.e. 0 means least positive sentiment and sentiment_max means - // the most positive from the sentiments present in the train data. Therefore - // e.g. if train data had only negative sentiment, then sentiment_max, would - // be still negative (although least negative). - // The sentiment shouldn't be confused with "score" or "magnitude" - // from the previous Natural Language Sentiment Analysis API. - int32 sentiment = 1; -} - -// Model evaluation metrics for text sentiment problems. -message TextSentimentEvaluationMetrics { - // Output only. Precision. - float precision = 1; - - // Output only. Recall. - float recall = 2; - - // Output only. The harmonic mean of recall and precision. - float f1_score = 3; - - // Output only. Mean absolute error. Only set for the overall model - // evaluation, not for evaluation of a single annotation spec. - float mean_absolute_error = 4; - - // Output only. Mean squared error. Only set for the overall model - // evaluation, not for evaluation of a single annotation spec. - float mean_squared_error = 5; - - // Output only. Linear weighted kappa. Only set for the overall model - // evaluation, not for evaluation of a single annotation spec. - float linear_kappa = 6; - - // Output only. Quadratic weighted kappa. Only set for the overall model - // evaluation, not for evaluation of a single annotation spec. - float quadratic_kappa = 7; - - // Output only. Confusion matrix of the evaluation. - // Only set for the overall model evaluation, not for evaluation of a single - // annotation spec. - ClassificationEvaluationMetrics.ConfusionMatrix confusion_matrix = 8; - - // Output only. The annotation spec ids used for this evaluation. - // Deprecated . - repeated string annotation_spec_id = 9 [deprecated = true]; -} diff --git a/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py b/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py deleted file mode 100644 index 1332660d..00000000 --- a/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/text_sentiment.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.automl_v1beta1.proto import ( - classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2, -) -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/text_sentiment.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\022TextSentimentProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n6google/cloud/automl_v1beta1/proto/text_sentiment.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto",\n\x17TextSentimentAnnotation\x12\x11\n\tsentiment\x18\x01 \x01(\x05"\xc5\x02\n\x1eTextSentimentEvaluationMetrics\x12\x11\n\tprecision\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x03 \x01(\x02\x12\x1b\n\x13mean_absolute_error\x18\x04 \x01(\x02\x12\x1a\n\x12mean_squared_error\x18\x05 \x01(\x02\x12\x14\n\x0clinear_kappa\x18\x06 \x01(\x02\x12\x17\n\x0fquadratic_kappa\x18\x07 \x01(\x02\x12\x66\n\x10\x63onfusion_matrix\x18\x08 \x01(\x0b\x32L.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix\x12\x1e\n\x12\x61nnotation_spec_id\x18\t \x03(\tB\x02\x18\x01\x42\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x12TextSentimentProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_TEXTSENTIMENTANNOTATION = _descriptor.Descriptor( - name="TextSentimentAnnotation", - full_name="google.cloud.automl.v1beta1.TextSentimentAnnotation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="sentiment", - full_name="google.cloud.automl.v1beta1.TextSentimentAnnotation.sentiment", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=173, - serialized_end=217, -) - - -_TEXTSENTIMENTEVALUATIONMETRICS = _descriptor.Descriptor( - name="TextSentimentEvaluationMetrics", - full_name="google.cloud.automl.v1beta1.TextSentimentEvaluationMetrics", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="precision", - full_name="google.cloud.automl.v1beta1.TextSentimentEvaluationMetrics.precision", - index=0, - number=1, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="recall", - full_name="google.cloud.automl.v1beta1.TextSentimentEvaluationMetrics.recall", - index=1, - number=2, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="f1_score", - full_name="google.cloud.automl.v1beta1.TextSentimentEvaluationMetrics.f1_score", - index=2, - number=3, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="mean_absolute_error", - full_name="google.cloud.automl.v1beta1.TextSentimentEvaluationMetrics.mean_absolute_error", - index=3, - number=4, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="mean_squared_error", - full_name="google.cloud.automl.v1beta1.TextSentimentEvaluationMetrics.mean_squared_error", - index=4, - number=5, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="linear_kappa", - full_name="google.cloud.automl.v1beta1.TextSentimentEvaluationMetrics.linear_kappa", - index=5, - number=6, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="quadratic_kappa", - full_name="google.cloud.automl.v1beta1.TextSentimentEvaluationMetrics.quadratic_kappa", - index=6, - number=7, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="confusion_matrix", - full_name="google.cloud.automl.v1beta1.TextSentimentEvaluationMetrics.confusion_matrix", - index=7, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="annotation_spec_id", - full_name="google.cloud.automl.v1beta1.TextSentimentEvaluationMetrics.annotation_spec_id", - index=8, - number=9, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\030\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=220, - serialized_end=545, -) - -_TEXTSENTIMENTEVALUATIONMETRICS.fields_by_name[ - "confusion_matrix" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2._CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX -) -DESCRIPTOR.message_types_by_name["TextSentimentAnnotation"] = _TEXTSENTIMENTANNOTATION -DESCRIPTOR.message_types_by_name[ - "TextSentimentEvaluationMetrics" -] = _TEXTSENTIMENTEVALUATIONMETRICS -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -TextSentimentAnnotation = _reflection.GeneratedProtocolMessageType( - "TextSentimentAnnotation", - (_message.Message,), - { - "DESCRIPTOR": _TEXTSENTIMENTANNOTATION, - "__module__": "google.cloud.automl_v1beta1.proto.text_sentiment_pb2", - "__doc__": """Contains annotation details specific to text sentiment. - - Attributes: - sentiment: - Output only. The sentiment with the semantic, as given to the - [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportD - ata] when populating the dataset from which the model used for - the prediction had been trained. The sentiment values are - between 0 and - Dataset.text_sentiment_dataset_metadata.sentiment_max - (inclusive), with higher value meaning more positive - sentiment. They are completely relative, i.e. 0 means least - positive sentiment and sentiment_max means the most positive - from the sentiments present in the train data. Therefore - e.g. if train data had only negative sentiment, then - sentiment_max, would be still negative (although least - negative). The sentiment shouldn’t be confused with “score” or - “magnitude” from the previous Natural Language Sentiment - Analysis API. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSentimentAnnotation) - }, -) -_sym_db.RegisterMessage(TextSentimentAnnotation) - -TextSentimentEvaluationMetrics = _reflection.GeneratedProtocolMessageType( - "TextSentimentEvaluationMetrics", - (_message.Message,), - { - "DESCRIPTOR": _TEXTSENTIMENTEVALUATIONMETRICS, - "__module__": "google.cloud.automl_v1beta1.proto.text_sentiment_pb2", - "__doc__": """Model evaluation metrics for text sentiment problems. - - Attributes: - precision: - Output only. Precision. - recall: - Output only. Recall. - f1_score: - Output only. The harmonic mean of recall and precision. - mean_absolute_error: - Output only. Mean absolute error. Only set for the overall - model evaluation, not for evaluation of a single annotation - spec. - mean_squared_error: - Output only. Mean squared error. Only set for the overall - model evaluation, not for evaluation of a single annotation - spec. - linear_kappa: - Output only. Linear weighted kappa. Only set for the overall - model evaluation, not for evaluation of a single annotation - spec. - quadratic_kappa: - Output only. Quadratic weighted kappa. Only set for the - overall model evaluation, not for evaluation of a single - annotation spec. - confusion_matrix: - Output only. Confusion matrix of the evaluation. Only set for - the overall model evaluation, not for evaluation of a single - annotation spec. - annotation_spec_id: - Output only. The annotation spec ids used for this evaluation. - Deprecated . - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSentimentEvaluationMetrics) - }, -) -_sym_db.RegisterMessage(TextSentimentEvaluationMetrics) - - -DESCRIPTOR._options = None -_TEXTSENTIMENTEVALUATIONMETRICS.fields_by_name["annotation_spec_id"]._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/text_sentiment_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/text_sentiment_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/text_sentiment_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/translation.proto b/google/cloud/automl_v1beta1/proto/translation.proto deleted file mode 100644 index 8585bd41..00000000 --- a/google/cloud/automl_v1beta1/proto/translation.proto +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/api/field_behavior.proto"; -import "google/cloud/automl/v1beta1/data_items.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_outer_classname = "TranslationProto"; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// Dataset metadata that is specific to translation. -message TranslationDatasetMetadata { - // Required. The BCP-47 language code of the source language. - string source_language_code = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The BCP-47 language code of the target language. - string target_language_code = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// Evaluation metrics for the dataset. -message TranslationEvaluationMetrics { - // Output only. BLEU score. - double bleu_score = 1; - - // Output only. BLEU score for base model. - double base_bleu_score = 2; -} - -// Model metadata that is specific to translation. -message TranslationModelMetadata { - // The resource name of the model to use as a baseline to train the custom - // model. If unset, we use the default base model provided by Google - // Translate. Format: - // `projects/{project_id}/locations/{location_id}/models/{model_id}` - string base_model = 1; - - // Output only. Inferred from the dataset. - // The source languge (The BCP-47 language code) that is used for training. - string source_language_code = 2; - - // Output only. The target languge (The BCP-47 language code) that is used for - // training. - string target_language_code = 3; -} - -// Annotation details specific to translation. -message TranslationAnnotation { - // Output only . The translated content. - TextSnippet translated_content = 1; -} diff --git a/google/cloud/automl_v1beta1/proto/translation_pb2.py b/google/cloud/automl_v1beta1/proto/translation_pb2.py deleted file mode 100644 index b5df3e32..00000000 --- a/google/cloud/automl_v1beta1/proto/translation_pb2.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/translation.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.cloud.automl_v1beta1.proto import ( - data_items_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__items__pb2, -) -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/translation.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\020TranslationProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n3google/cloud/automl_v1beta1/proto/translation.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a\x1cgoogle/api/annotations.proto"b\n\x1aTranslationDatasetMetadata\x12!\n\x14source_language_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12!\n\x14target_language_code\x18\x02 \x01(\tB\x03\xe0\x41\x02"K\n\x1cTranslationEvaluationMetrics\x12\x12\n\nbleu_score\x18\x01 \x01(\x01\x12\x17\n\x0f\x62\x61se_bleu_score\x18\x02 \x01(\x01"j\n\x18TranslationModelMetadata\x12\x12\n\nbase_model\x18\x01 \x01(\t\x12\x1c\n\x14source_language_code\x18\x02 \x01(\t\x12\x1c\n\x14target_language_code\x18\x03 \x01(\t"]\n\x15TranslationAnnotation\x12\x44\n\x12translated_content\x18\x01 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippetB\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x10TranslationProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__items__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_TRANSLATIONDATASETMETADATA = _descriptor.Descriptor( - name="TranslationDatasetMetadata", - full_name="google.cloud.automl.v1beta1.TranslationDatasetMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="source_language_code", - full_name="google.cloud.automl.v1beta1.TranslationDatasetMetadata.source_language_code", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="target_language_code", - full_name="google.cloud.automl.v1beta1.TranslationDatasetMetadata.target_language_code", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=199, - serialized_end=297, -) - - -_TRANSLATIONEVALUATIONMETRICS = _descriptor.Descriptor( - name="TranslationEvaluationMetrics", - full_name="google.cloud.automl.v1beta1.TranslationEvaluationMetrics", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="bleu_score", - full_name="google.cloud.automl.v1beta1.TranslationEvaluationMetrics.bleu_score", - index=0, - number=1, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="base_bleu_score", - full_name="google.cloud.automl.v1beta1.TranslationEvaluationMetrics.base_bleu_score", - index=1, - number=2, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=299, - serialized_end=374, -) - - -_TRANSLATIONMODELMETADATA = _descriptor.Descriptor( - name="TranslationModelMetadata", - full_name="google.cloud.automl.v1beta1.TranslationModelMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="base_model", - full_name="google.cloud.automl.v1beta1.TranslationModelMetadata.base_model", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_language_code", - full_name="google.cloud.automl.v1beta1.TranslationModelMetadata.source_language_code", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="target_language_code", - full_name="google.cloud.automl.v1beta1.TranslationModelMetadata.target_language_code", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=376, - serialized_end=482, -) - - -_TRANSLATIONANNOTATION = _descriptor.Descriptor( - name="TranslationAnnotation", - full_name="google.cloud.automl.v1beta1.TranslationAnnotation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="translated_content", - full_name="google.cloud.automl.v1beta1.TranslationAnnotation.translated_content", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=484, - serialized_end=577, -) - -_TRANSLATIONANNOTATION.fields_by_name[ - "translated_content" -].message_type = ( - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__items__pb2._TEXTSNIPPET -) -DESCRIPTOR.message_types_by_name[ - "TranslationDatasetMetadata" -] = _TRANSLATIONDATASETMETADATA -DESCRIPTOR.message_types_by_name[ - "TranslationEvaluationMetrics" -] = _TRANSLATIONEVALUATIONMETRICS -DESCRIPTOR.message_types_by_name["TranslationModelMetadata"] = _TRANSLATIONMODELMETADATA -DESCRIPTOR.message_types_by_name["TranslationAnnotation"] = _TRANSLATIONANNOTATION -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -TranslationDatasetMetadata = _reflection.GeneratedProtocolMessageType( - "TranslationDatasetMetadata", - (_message.Message,), - { - "DESCRIPTOR": _TRANSLATIONDATASETMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", - "__doc__": """Dataset metadata that is specific to translation. - - Attributes: - source_language_code: - Required. The BCP-47 language code of the source language. - target_language_code: - Required. The BCP-47 language code of the target language. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TranslationDatasetMetadata) - }, -) -_sym_db.RegisterMessage(TranslationDatasetMetadata) - -TranslationEvaluationMetrics = _reflection.GeneratedProtocolMessageType( - "TranslationEvaluationMetrics", - (_message.Message,), - { - "DESCRIPTOR": _TRANSLATIONEVALUATIONMETRICS, - "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", - "__doc__": """Evaluation metrics for the dataset. - - Attributes: - bleu_score: - Output only. BLEU score. - base_bleu_score: - Output only. BLEU score for base model. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TranslationEvaluationMetrics) - }, -) -_sym_db.RegisterMessage(TranslationEvaluationMetrics) - -TranslationModelMetadata = _reflection.GeneratedProtocolMessageType( - "TranslationModelMetadata", - (_message.Message,), - { - "DESCRIPTOR": _TRANSLATIONMODELMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", - "__doc__": """Model metadata that is specific to translation. - - Attributes: - base_model: - The resource name of the model to use as a baseline to train - the custom model. If unset, we use the default base model - provided by Google Translate. Format: ``projects/{project_id}/ - locations/{location_id}/models/{model_id}`` - source_language_code: - Output only. Inferred from the dataset. The source languge - (The BCP-47 language code) that is used for training. - target_language_code: - Output only. The target languge (The BCP-47 language code) - that is used for training. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TranslationModelMetadata) - }, -) -_sym_db.RegisterMessage(TranslationModelMetadata) - -TranslationAnnotation = _reflection.GeneratedProtocolMessageType( - "TranslationAnnotation", - (_message.Message,), - { - "DESCRIPTOR": _TRANSLATIONANNOTATION, - "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", - "__doc__": """Annotation details specific to translation. - - Attributes: - translated_content: - Output only . The translated content. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TranslationAnnotation) - }, -) -_sym_db.RegisterMessage(TranslationAnnotation) - - -DESCRIPTOR._options = None -_TRANSLATIONDATASETMETADATA.fields_by_name["source_language_code"]._options = None -_TRANSLATIONDATASETMETADATA.fields_by_name["target_language_code"]._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/translation_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/translation_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/translation_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/proto/video.proto b/google/cloud/automl_v1beta1/proto/video.proto deleted file mode 100644 index 268ae2a8..00000000 --- a/google/cloud/automl_v1beta1/proto/video.proto +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.automl.v1beta1; - -import "google/cloud/automl/v1beta1/classification.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; -option java_multiple_files = true; -option java_outer_classname = "VideoProto"; -option java_package = "com.google.cloud.automl.v1beta1"; -option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1"; -option ruby_package = "Google::Cloud::AutoML::V1beta1"; - -// Dataset metadata specific to video classification. -// All Video Classification datasets are treated as multi label. -message VideoClassificationDatasetMetadata { - -} - -// Dataset metadata specific to video object tracking. -message VideoObjectTrackingDatasetMetadata { - -} - -// Model metadata specific to video classification. -message VideoClassificationModelMetadata { - -} - -// Model metadata specific to video object tracking. -message VideoObjectTrackingModelMetadata { - -} diff --git a/google/cloud/automl_v1beta1/proto/video_pb2.py b/google/cloud/automl_v1beta1/proto/video_pb2.py deleted file mode 100644 index b870cb4c..00000000 --- a/google/cloud/automl_v1beta1/proto/video_pb2.py +++ /dev/null @@ -1,183 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/automl_v1beta1/proto/video.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.automl_v1beta1.proto import ( - classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2, -) -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/automl_v1beta1/proto/video.proto", - package="google.cloud.automl.v1beta1", - syntax="proto3", - serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\nVideoProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n-google/cloud/automl_v1beta1/proto/video.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto"$\n"VideoClassificationDatasetMetadata"$\n"VideoObjectTrackingDatasetMetadata""\n VideoClassificationModelMetadata""\n VideoObjectTrackingModelMetadataB\xb1\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\nVideoProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', - dependencies=[ - google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_VIDEOCLASSIFICATIONDATASETMETADATA = _descriptor.Descriptor( - name="VideoClassificationDatasetMetadata", - full_name="google.cloud.automl.v1beta1.VideoClassificationDatasetMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=164, - serialized_end=200, -) - - -_VIDEOOBJECTTRACKINGDATASETMETADATA = _descriptor.Descriptor( - name="VideoObjectTrackingDatasetMetadata", - full_name="google.cloud.automl.v1beta1.VideoObjectTrackingDatasetMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=202, - serialized_end=238, -) - - -_VIDEOCLASSIFICATIONMODELMETADATA = _descriptor.Descriptor( - name="VideoClassificationModelMetadata", - full_name="google.cloud.automl.v1beta1.VideoClassificationModelMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=240, - serialized_end=274, -) - - -_VIDEOOBJECTTRACKINGMODELMETADATA = _descriptor.Descriptor( - name="VideoObjectTrackingModelMetadata", - full_name="google.cloud.automl.v1beta1.VideoObjectTrackingModelMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=276, - serialized_end=310, -) - -DESCRIPTOR.message_types_by_name[ - "VideoClassificationDatasetMetadata" -] = _VIDEOCLASSIFICATIONDATASETMETADATA -DESCRIPTOR.message_types_by_name[ - "VideoObjectTrackingDatasetMetadata" -] = _VIDEOOBJECTTRACKINGDATASETMETADATA -DESCRIPTOR.message_types_by_name[ - "VideoClassificationModelMetadata" -] = _VIDEOCLASSIFICATIONMODELMETADATA -DESCRIPTOR.message_types_by_name[ - "VideoObjectTrackingModelMetadata" -] = _VIDEOOBJECTTRACKINGMODELMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -VideoClassificationDatasetMetadata = _reflection.GeneratedProtocolMessageType( - "VideoClassificationDatasetMetadata", - (_message.Message,), - { - "DESCRIPTOR": _VIDEOCLASSIFICATIONDATASETMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", - "__doc__": """Dataset metadata specific to video classification. All Video - Classification datasets are treated as multi label.""", - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoClassificationDatasetMetadata) - }, -) -_sym_db.RegisterMessage(VideoClassificationDatasetMetadata) - -VideoObjectTrackingDatasetMetadata = _reflection.GeneratedProtocolMessageType( - "VideoObjectTrackingDatasetMetadata", - (_message.Message,), - { - "DESCRIPTOR": _VIDEOOBJECTTRACKINGDATASETMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", - "__doc__": """Dataset metadata specific to video object tracking.""", - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingDatasetMetadata) - }, -) -_sym_db.RegisterMessage(VideoObjectTrackingDatasetMetadata) - -VideoClassificationModelMetadata = _reflection.GeneratedProtocolMessageType( - "VideoClassificationModelMetadata", - (_message.Message,), - { - "DESCRIPTOR": _VIDEOCLASSIFICATIONMODELMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", - "__doc__": """Model metadata specific to video classification.""", - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoClassificationModelMetadata) - }, -) -_sym_db.RegisterMessage(VideoClassificationModelMetadata) - -VideoObjectTrackingModelMetadata = _reflection.GeneratedProtocolMessageType( - "VideoObjectTrackingModelMetadata", - (_message.Message,), - { - "DESCRIPTOR": _VIDEOOBJECTTRACKINGMODELMETADATA, - "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", - "__doc__": """Model metadata specific to video object tracking.""", - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingModelMetadata) - }, -) -_sym_db.RegisterMessage(VideoObjectTrackingModelMetadata) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/video_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/video_pb2_grpc.py deleted file mode 100644 index 07cb78fe..00000000 --- a/google/cloud/automl_v1beta1/proto/video_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/google/cloud/automl_v1beta1/py.typed b/google/cloud/automl_v1beta1/py.typed new file mode 100644 index 00000000..0560ba18 --- /dev/null +++ b/google/cloud/automl_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-automl package uses inline types. diff --git a/google/cloud/automl_v1beta1/services/__init__.py b/google/cloud/automl_v1beta1/services/__init__.py new file mode 100644 index 00000000..42ffdf2b --- /dev/null +++ b/google/cloud/automl_v1beta1/services/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/google/cloud/automl_v1beta1/services/auto_ml/__init__.py b/google/cloud/automl_v1beta1/services/auto_ml/__init__.py new file mode 100644 index 00000000..3324f01a --- /dev/null +++ b/google/cloud/automl_v1beta1/services/auto_ml/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import AutoMlClient +from .async_client import AutoMlAsyncClient + +__all__ = ( + "AutoMlClient", + "AutoMlAsyncClient", +) diff --git a/google/cloud/automl_v1beta1/services/auto_ml/async_client.py b/google/cloud/automl_v1beta1/services/auto_ml/async_client.py new file mode 100644 index 00000000..5c6d1f5b --- /dev/null +++ b/google/cloud/automl_v1beta1/services/auto_ml/async_client.py @@ -0,0 +1,2358 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.automl_v1beta1.services.auto_ml import pagers +from google.cloud.automl_v1beta1.types import annotation_spec +from google.cloud.automl_v1beta1.types import classification +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec +from google.cloud.automl_v1beta1.types import data_stats +from google.cloud.automl_v1beta1.types import data_types +from google.cloud.automl_v1beta1.types import dataset +from google.cloud.automl_v1beta1.types import dataset as gca_dataset +from google.cloud.automl_v1beta1.types import detection +from google.cloud.automl_v1beta1.types import image +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import model +from google.cloud.automl_v1beta1.types import model as gca_model +from google.cloud.automl_v1beta1.types import model_evaluation +from google.cloud.automl_v1beta1.types import operations +from google.cloud.automl_v1beta1.types import regression +from google.cloud.automl_v1beta1.types import service +from google.cloud.automl_v1beta1.types import table_spec +from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec +from google.cloud.automl_v1beta1.types import tables +from google.cloud.automl_v1beta1.types import text +from google.cloud.automl_v1beta1.types import text_extraction +from google.cloud.automl_v1beta1.types import text_sentiment +from google.cloud.automl_v1beta1.types import translation +from google.cloud.automl_v1beta1.types import video +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport +from .client import AutoMlClient + + +class AutoMlAsyncClient: + """AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + """ + + _client: AutoMlClient + + DEFAULT_ENDPOINT = AutoMlClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = AutoMlClient.DEFAULT_MTLS_ENDPOINT + + dataset_path = staticmethod(AutoMlClient.dataset_path) + + model_path = staticmethod(AutoMlClient.model_path) + + column_spec_path = staticmethod(AutoMlClient.column_spec_path) + + table_spec_path = staticmethod(AutoMlClient.table_spec_path) + + from_service_account_file = AutoMlClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(AutoMlClient).get_transport_class, type(AutoMlClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, AutoMlTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the auto ml client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.AutoMlTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = AutoMlClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_dataset( + self, + request: service.CreateDatasetRequest = None, + *, + parent: str = None, + dataset: gca_dataset.Dataset = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Creates a dataset. + + Args: + request (:class:`~.service.CreateDatasetRequest`): + The request object. Request message for + [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. + parent (:class:`str`): + Required. The resource name of the + project to create the dataset for. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (:class:`~.gca_dataset.Dataset`): + Required. The dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_dataset.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent, dataset]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.CreateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_dataset( + self, + request: service.GetDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a dataset. + + Args: + request (:class:`~.service.GetDatasetRequest`): + The request object. Request message for + [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. + name (:class:`str`): + Required. The resource name of the + dataset to retrieve. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.dataset.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_dataset, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_datasets( + self, + request: service.ListDatasetsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsAsyncPager: + r"""Lists datasets in a project. + + Args: + request (:class:`~.service.ListDatasetsRequest`): + The request object. Request message for + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. + parent (:class:`str`): + Required. The resource name of the + project from which to list datasets. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListDatasetsAsyncPager: + Response message for + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ListDatasetsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_datasets, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDatasetsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_dataset( + self, + request: service.UpdateDatasetRequest = None, + *, + dataset: gca_dataset.Dataset = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a dataset. + + Args: + request (:class:`~.service.UpdateDatasetRequest`): + The request object. Request message for + [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] + dataset (:class:`~.gca_dataset.Dataset`): + Required. The dataset which replaces + the resource on the server. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_dataset.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([dataset]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.UpdateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("dataset.name", request.dataset.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_dataset( + self, + request: service.DeleteDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Args: + request (:class:`~.service.DeleteDatasetRequest`): + The request object. Request message for + [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. + name (:class:`str`): + Required. The resource name of the + dataset to delete. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.DeleteDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_dataset, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def import_data( + self, + request: service.ImportDataRequest = None, + *, + name: str = None, + input_config: io.InputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + Args: + request (:class:`~.service.ImportDataRequest`): + The request object. Request message for + [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. + name (:class:`str`): + Required. Dataset name. Dataset must + already exist. All imported annotations + and examples will be added. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (:class:`~.io.InputConfig`): + Required. The desired input location + and its domain specific semantics, if + any. + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, input_config]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ImportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_data, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def export_data( + self, + request: service.ExportDataRequest = None, + *, + name: str = None, + output_config: io.OutputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.ExportDataRequest`): + The request object. Request message for + [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. + name (:class:`str`): + Required. The resource name of the + dataset. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`~.io.OutputConfig`): + Required. The desired output + location. + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, output_config]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ExportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_data, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def get_annotation_spec( + self, + request: service.GetAnnotationSpecRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an annotation spec. + + Args: + request (:class:`~.service.GetAnnotationSpecRequest`): + The request object. Request message for + [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. + name (:class:`str`): + Required. The resource name of the + annotation spec to retrieve. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.annotation_spec.AnnotationSpec: + A definition of an annotation spec. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetAnnotationSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_annotation_spec, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_table_spec( + self, + request: service.GetTableSpecRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table_spec.TableSpec: + r"""Gets a table spec. + + Args: + request (:class:`~.service.GetTableSpecRequest`): + The request object. Request message for + [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. + name (:class:`str`): + Required. The resource name of the + table spec to retrieve. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.table_spec.TableSpec: + A specification of a relational table. The table's + schema is represented via its child column specs. It is + pre-populated as part of ImportData by schema inference + algorithm, the version of which is a required parameter + of ImportData InputConfig. Note: While working with a + table, at times the schema may be inconsistent with the + data in the table (e.g. string in a FLOAT64 column). The + consistency validation is done upon creation of a model. + Used by: + + - Tables + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetTableSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_table_spec, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_table_specs( + self, + request: service.ListTableSpecsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTableSpecsAsyncPager: + r"""Lists table specs in a dataset. + + Args: + request (:class:`~.service.ListTableSpecsRequest`): + The request object. Request message for + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. + parent (:class:`str`): + Required. The resource name of the + dataset to list table specs from. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListTableSpecsAsyncPager: + Response message for + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ListTableSpecsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_table_specs, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTableSpecsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_table_spec( + self, + request: service.UpdateTableSpecRequest = None, + *, + table_spec: gca_table_spec.TableSpec = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_table_spec.TableSpec: + r"""Updates a table spec. + + Args: + request (:class:`~.service.UpdateTableSpecRequest`): + The request object. Request message for + [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] + table_spec (:class:`~.gca_table_spec.TableSpec`): + Required. The table spec which + replaces the resource on the server. + This corresponds to the ``table_spec`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_table_spec.TableSpec: + A specification of a relational table. The table's + schema is represented via its child column specs. It is + pre-populated as part of ImportData by schema inference + algorithm, the version of which is a required parameter + of ImportData InputConfig. Note: While working with a + table, at times the schema may be inconsistent with the + data in the table (e.g. string in a FLOAT64 column). The + consistency validation is done upon creation of a model. + Used by: + + - Tables + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([table_spec]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.UpdateTableSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_spec is not None: + request.table_spec = table_spec + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_table_spec, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_spec.name", request.table_spec.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_column_spec( + self, + request: service.GetColumnSpecRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> column_spec.ColumnSpec: + r"""Gets a column spec. + + Args: + request (:class:`~.service.GetColumnSpecRequest`): + The request object. Request message for + [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. + name (:class:`str`): + Required. The resource name of the + column spec to retrieve. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.column_spec.ColumnSpec: + A representation of a column in a relational table. When + listing them, column specs are returned in the same + order in which they were given on import . Used by: + + - Tables + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetColumnSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_column_spec, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_column_specs( + self, + request: service.ListColumnSpecsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListColumnSpecsAsyncPager: + r"""Lists column specs in a table spec. + + Args: + request (:class:`~.service.ListColumnSpecsRequest`): + The request object. Request message for + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. + parent (:class:`str`): + Required. The resource name of the + table spec to list column specs from. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListColumnSpecsAsyncPager: + Response message for + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ListColumnSpecsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_column_specs, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListColumnSpecsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_column_spec( + self, + request: service.UpdateColumnSpecRequest = None, + *, + column_spec: gca_column_spec.ColumnSpec = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_column_spec.ColumnSpec: + r"""Updates a column spec. + + Args: + request (:class:`~.service.UpdateColumnSpecRequest`): + The request object. Request message for + [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] + column_spec (:class:`~.gca_column_spec.ColumnSpec`): + Required. The column spec which + replaces the resource on the server. + This corresponds to the ``column_spec`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_column_spec.ColumnSpec: + A representation of a column in a relational table. When + listing them, column specs are returned in the same + order in which they were given on import . Used by: + + - Tables + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([column_spec]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.UpdateColumnSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if column_spec is not None: + request.column_spec = column_spec + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_column_spec, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("column_spec.name", request.column_spec.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_model( + self, + request: service.CreateModelRequest = None, + *, + parent: str = None, + model: gca_model.Model = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + Args: + request (:class:`~.service.CreateModelRequest`): + The request object. Request message for + [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. + parent (:class:`str`): + Required. Resource name of the parent + project where the model is being + created. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (:class:`~.gca_model.Model`): + Required. The model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.gca_model.Model``: API proto representing a + trained machine learning model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent, model]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.CreateModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_model.Model, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def get_model( + self, + request: service.GetModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a model. + + Args: + request (:class:`~.service.GetModelRequest`): + The request object. Request message for + [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. + name (:class:`str`): + Required. Resource name of the model. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model.Model: + API proto representing a trained + machine learning model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_models( + self, + request: service.ListModelsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsAsyncPager: + r"""Lists models. + + Args: + request (:class:`~.service.ListModelsRequest`): + The request object. Request message for + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + parent (:class:`str`): + Required. Resource name of the + project, from which to list the models. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListModelsAsyncPager: + Response message for + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ListModelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_models, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_model( + self, + request: service.DeleteModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Args: + request (:class:`~.service.DeleteModelRequest`): + The request object. Request message for + [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. + name (:class:`str`): + Required. Resource name of the model + being deleted. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.DeleteModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def deploy_model( + self, + request: service.DeployModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + + [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.DeployModelRequest`): + The request object. Request message for + [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. + name (:class:`str`): + Required. Resource name of the model + to deploy. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.DeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.deploy_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def undeploy_model( + self, + request: service.UndeployModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.UndeployModelRequest`): + The request object. Request message for + [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. + name (:class:`str`): + Required. Resource name of the model + to undeploy. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.UndeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.undeploy_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def export_model( + self, + request: service.ExportModelRequest = None, + *, + name: str = None, + output_config: io.ModelExportOutputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + + [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.ExportModelRequest`): + The request object. Request message for + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an + error code will be returned. + name (:class:`str`): + Required. The resource name of the + model to export. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`~.io.ModelExportOutputConfig`): + Required. The desired output location + and configuration. + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, output_config]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ExportModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def export_evaluated_examples( + self, + request: service.ExportEvaluatedExamplesRequest = None, + *, + name: str = None, + output_config: io.ExportEvaluatedExamplesOutputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports examples on which the model was evaluated (i.e. which + were in the TEST set of the dataset the model was created from), + together with their ground truth annotations and the annotations + created (predicted) by the model. The examples, ground truth and + predictions are exported in the state they were at the moment + the model was evaluated. + + This export is available only for 30 days since the model + evaluation is created. + + Currently only available for Tables. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.ExportEvaluatedExamplesRequest`): + The request object. Request message for + [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. + name (:class:`str`): + Required. The resource name of the + model whose evaluated examples are to be + exported. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`~.io.ExportEvaluatedExamplesOutputConfig`): + Required. The desired output location + and configuration. + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, output_config]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ExportEvaluatedExamplesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_evaluated_examples, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def get_model_evaluation( + self, + request: service.GetModelEvaluationRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a model evaluation. + + Args: + request (:class:`~.service.GetModelEvaluationRequest`): + The request object. Request message for + [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. + name (:class:`str`): + Required. Resource name for the model + evaluation. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model_evaluation.ModelEvaluation: + Evaluation results of a model. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.GetModelEvaluationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_evaluation, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_model_evaluations( + self, + request: service.ListModelEvaluationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsAsyncPager: + r"""Lists model evaluations. + + Args: + request (:class:`~.service.ListModelEvaluationsRequest`): + The request object. Request message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. + parent (:class:`str`): + Required. Resource name of the model + to list the model evaluations for. If + modelId is set as "-", this will list + model evaluations from across all models + of the parent location. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListModelEvaluationsAsyncPager: + Response message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = service.ListModelEvaluationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_evaluations, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelEvaluationsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-automl",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("AutoMlAsyncClient",) diff --git a/google/cloud/automl_v1beta1/services/auto_ml/client.py b/google/cloud/automl_v1beta1/services/auto_ml/client.py new file mode 100644 index 00000000..9416b524 --- /dev/null +++ b/google/cloud/automl_v1beta1/services/auto_ml/client.py @@ -0,0 +1,2503 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import os +import re +from typing import Callable, Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.automl_v1beta1.services.auto_ml import pagers +from google.cloud.automl_v1beta1.types import annotation_spec +from google.cloud.automl_v1beta1.types import classification +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec +from google.cloud.automl_v1beta1.types import data_stats +from google.cloud.automl_v1beta1.types import data_types +from google.cloud.automl_v1beta1.types import dataset +from google.cloud.automl_v1beta1.types import dataset as gca_dataset +from google.cloud.automl_v1beta1.types import detection +from google.cloud.automl_v1beta1.types import image +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import model +from google.cloud.automl_v1beta1.types import model as gca_model +from google.cloud.automl_v1beta1.types import model_evaluation +from google.cloud.automl_v1beta1.types import operations +from google.cloud.automl_v1beta1.types import regression +from google.cloud.automl_v1beta1.types import service +from google.cloud.automl_v1beta1.types import table_spec +from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec +from google.cloud.automl_v1beta1.types import tables +from google.cloud.automl_v1beta1.types import text +from google.cloud.automl_v1beta1.types import text_extraction +from google.cloud.automl_v1beta1.types import text_sentiment +from google.cloud.automl_v1beta1.types import translation +from google.cloud.automl_v1beta1.types import video +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import AutoMlGrpcTransport +from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport + + +class AutoMlClientMeta(type): + """Metaclass for the AutoMl client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[AutoMlTransport]] + _transport_registry["grpc"] = AutoMlGrpcTransport + _transport_registry["grpc_asyncio"] = AutoMlGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[AutoMlTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class AutoMlClient(metaclass=AutoMlClientMeta): + """AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "automl.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @staticmethod + def column_spec_path( + project: str, location: str, dataset: str, table_spec: str, column_spec: str, + ) -> str: + """Return a fully-qualified column_spec string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}".format( + project=project, + location=location, + dataset=dataset, + table_spec=table_spec, + column_spec=column_spec, + ) + + @staticmethod + def parse_column_spec_path(path: str) -> Dict[str, str]: + """Parse a column_spec path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/tableSpecs/(?P.+?)/columnSpecs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str, location: str, dataset: str,) -> str: + """Return a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, + ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str, str]: + """Parse a dataset path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str, location: str, model: str,) -> str: + """Return a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format( + project=project, location=location, model=model, + ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str, str]: + """Parse a model path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def table_spec_path( + project: str, location: str, dataset: str, table_spec: str, + ) -> str: + """Return a fully-qualified table_spec string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}".format( + project=project, location=location, dataset=dataset, table_spec=table_spec, + ) + + @staticmethod + def parse_table_spec_path(path: str) -> Dict[str, str]: + """Parse a table_spec path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/tableSpecs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, AutoMlTransport] = None, + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the auto ml client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.AutoMlTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = ClientOptions.from_dict(client_options) + if client_options is None: + client_options = ClientOptions.ClientOptions() + + if client_options.api_endpoint is None: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + if use_mtls_env == "never": + client_options.api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + has_client_cert_source = ( + client_options.client_cert_source is not None + or mtls.has_default_client_cert_source() + ) + client_options.api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT + if has_client_cert_source + else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, AutoMlTransport): + # transport is a AutoMlTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=client_options.api_endpoint, + scopes=client_options.scopes, + api_mtls_endpoint=client_options.api_endpoint, + client_cert_source=client_options.client_cert_source, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_dataset( + self, + request: service.CreateDatasetRequest = None, + *, + parent: str = None, + dataset: gca_dataset.Dataset = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Creates a dataset. + + Args: + request (:class:`~.service.CreateDatasetRequest`): + The request object. Request message for + [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. + parent (:class:`str`): + Required. The resource name of the + project to create the dataset for. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (:class:`~.gca_dataset.Dataset`): + Required. The dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_dataset.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, dataset]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.CreateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.CreateDatasetRequest): + request = service.CreateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_dataset( + self, + request: service.GetDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a dataset. + + Args: + request (:class:`~.service.GetDatasetRequest`): + The request object. Request message for + [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. + name (:class:`str`): + Required. The resource name of the + dataset to retrieve. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.dataset.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetDatasetRequest): + request = service.GetDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_datasets( + self, + request: service.ListDatasetsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsPager: + r"""Lists datasets in a project. + + Args: + request (:class:`~.service.ListDatasetsRequest`): + The request object. Request message for + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. + parent (:class:`str`): + Required. The resource name of the + project from which to list datasets. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListDatasetsPager: + Response message for + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListDatasetsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListDatasetsRequest): + request = service.ListDatasetsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_datasets] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDatasetsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_dataset( + self, + request: service.UpdateDatasetRequest = None, + *, + dataset: gca_dataset.Dataset = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a dataset. + + Args: + request (:class:`~.service.UpdateDatasetRequest`): + The request object. Request message for + [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] + dataset (:class:`~.gca_dataset.Dataset`): + Required. The dataset which replaces + the resource on the server. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_dataset.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([dataset]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.UpdateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.UpdateDatasetRequest): + request = service.UpdateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("dataset.name", request.dataset.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_dataset( + self, + request: service.DeleteDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Args: + request (:class:`~.service.DeleteDatasetRequest`): + The request object. Request message for + [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. + name (:class:`str`): + Required. The resource name of the + dataset to delete. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.DeleteDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.DeleteDatasetRequest): + request = service.DeleteDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def import_data( + self, + request: service.ImportDataRequest = None, + *, + name: str = None, + input_config: io.InputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + Args: + request (:class:`~.service.ImportDataRequest`): + The request object. Request message for + [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. + name (:class:`str`): + Required. Dataset name. Dataset must + already exist. All imported annotations + and examples will be added. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (:class:`~.io.InputConfig`): + Required. The desired input location + and its domain specific semantics, if + any. + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, input_config]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ImportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ImportDataRequest): + request = service.ImportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def export_data( + self, + request: service.ExportDataRequest = None, + *, + name: str = None, + output_config: io.OutputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.ExportDataRequest`): + The request object. Request message for + [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. + name (:class:`str`): + Required. The resource name of the + dataset. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`~.io.OutputConfig`): + Required. The desired output + location. + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ExportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ExportDataRequest): + request = service.ExportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def get_annotation_spec( + self, + request: service.GetAnnotationSpecRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an annotation spec. + + Args: + request (:class:`~.service.GetAnnotationSpecRequest`): + The request object. Request message for + [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. + name (:class:`str`): + Required. The resource name of the + annotation spec to retrieve. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.annotation_spec.AnnotationSpec: + A definition of an annotation spec. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetAnnotationSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetAnnotationSpecRequest): + request = service.GetAnnotationSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_table_spec( + self, + request: service.GetTableSpecRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table_spec.TableSpec: + r"""Gets a table spec. + + Args: + request (:class:`~.service.GetTableSpecRequest`): + The request object. Request message for + [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. + name (:class:`str`): + Required. The resource name of the + table spec to retrieve. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.table_spec.TableSpec: + A specification of a relational table. The table's + schema is represented via its child column specs. It is + pre-populated as part of ImportData by schema inference + algorithm, the version of which is a required parameter + of ImportData InputConfig. Note: While working with a + table, at times the schema may be inconsistent with the + data in the table (e.g. string in a FLOAT64 column). The + consistency validation is done upon creation of a model. + Used by: + + - Tables + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetTableSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetTableSpecRequest): + request = service.GetTableSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_table_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_table_specs( + self, + request: service.ListTableSpecsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTableSpecsPager: + r"""Lists table specs in a dataset. + + Args: + request (:class:`~.service.ListTableSpecsRequest`): + The request object. Request message for + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. + parent (:class:`str`): + Required. The resource name of the + dataset to list table specs from. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListTableSpecsPager: + Response message for + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListTableSpecsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListTableSpecsRequest): + request = service.ListTableSpecsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_table_specs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTableSpecsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_table_spec( + self, + request: service.UpdateTableSpecRequest = None, + *, + table_spec: gca_table_spec.TableSpec = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_table_spec.TableSpec: + r"""Updates a table spec. + + Args: + request (:class:`~.service.UpdateTableSpecRequest`): + The request object. Request message for + [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] + table_spec (:class:`~.gca_table_spec.TableSpec`): + Required. The table spec which + replaces the resource on the server. + This corresponds to the ``table_spec`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_table_spec.TableSpec: + A specification of a relational table. The table's + schema is represented via its child column specs. It is + pre-populated as part of ImportData by schema inference + algorithm, the version of which is a required parameter + of ImportData InputConfig. Note: While working with a + table, at times the schema may be inconsistent with the + data in the table (e.g. string in a FLOAT64 column). The + consistency validation is done upon creation of a model. + Used by: + + - Tables + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_spec]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.UpdateTableSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.UpdateTableSpecRequest): + request = service.UpdateTableSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_spec is not None: + request.table_spec = table_spec + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_table_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_spec.name", request.table_spec.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_column_spec( + self, + request: service.GetColumnSpecRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> column_spec.ColumnSpec: + r"""Gets a column spec. + + Args: + request (:class:`~.service.GetColumnSpecRequest`): + The request object. Request message for + [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. + name (:class:`str`): + Required. The resource name of the + column spec to retrieve. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.column_spec.ColumnSpec: + A representation of a column in a relational table. When + listing them, column specs are returned in the same + order in which they were given on import . Used by: + + - Tables + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetColumnSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetColumnSpecRequest): + request = service.GetColumnSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_column_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_column_specs( + self, + request: service.ListColumnSpecsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListColumnSpecsPager: + r"""Lists column specs in a table spec. + + Args: + request (:class:`~.service.ListColumnSpecsRequest`): + The request object. Request message for + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. + parent (:class:`str`): + Required. The resource name of the + table spec to list column specs from. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListColumnSpecsPager: + Response message for + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListColumnSpecsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListColumnSpecsRequest): + request = service.ListColumnSpecsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_column_specs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListColumnSpecsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_column_spec( + self, + request: service.UpdateColumnSpecRequest = None, + *, + column_spec: gca_column_spec.ColumnSpec = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_column_spec.ColumnSpec: + r"""Updates a column spec. + + Args: + request (:class:`~.service.UpdateColumnSpecRequest`): + The request object. Request message for + [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] + column_spec (:class:`~.gca_column_spec.ColumnSpec`): + Required. The column spec which + replaces the resource on the server. + This corresponds to the ``column_spec`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_column_spec.ColumnSpec: + A representation of a column in a relational table. When + listing them, column specs are returned in the same + order in which they were given on import . Used by: + + - Tables + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([column_spec]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.UpdateColumnSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.UpdateColumnSpecRequest): + request = service.UpdateColumnSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if column_spec is not None: + request.column_spec = column_spec + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_column_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("column_spec.name", request.column_spec.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_model( + self, + request: service.CreateModelRequest = None, + *, + parent: str = None, + model: gca_model.Model = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + Args: + request (:class:`~.service.CreateModelRequest`): + The request object. Request message for + [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. + parent (:class:`str`): + Required. Resource name of the parent + project where the model is being + created. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (:class:`~.gca_model.Model`): + Required. The model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.gca_model.Model``: API proto representing a + trained machine learning model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.CreateModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.CreateModelRequest): + request = service.CreateModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gca_model.Model, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def get_model( + self, + request: service.GetModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a model. + + Args: + request (:class:`~.service.GetModelRequest`): + The request object. Request message for + [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. + name (:class:`str`): + Required. Resource name of the model. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model.Model: + API proto representing a trained + machine learning model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetModelRequest): + request = service.GetModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_models( + self, + request: service.ListModelsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsPager: + r"""Lists models. + + Args: + request (:class:`~.service.ListModelsRequest`): + The request object. Request message for + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + parent (:class:`str`): + Required. Resource name of the + project, from which to list the models. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListModelsPager: + Response message for + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListModelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListModelsRequest): + request = service.ListModelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_models] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_model( + self, + request: service.DeleteModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Args: + request (:class:`~.service.DeleteModelRequest`): + The request object. Request message for + [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. + name (:class:`str`): + Required. Resource name of the model + being deleted. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.DeleteModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.DeleteModelRequest): + request = service.DeleteModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def deploy_model( + self, + request: service.DeployModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + + [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.DeployModelRequest`): + The request object. Request message for + [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. + name (:class:`str`): + Required. Resource name of the model + to deploy. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.DeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.DeployModelRequest): + request = service.DeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.deploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def undeploy_model( + self, + request: service.UndeployModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.UndeployModelRequest`): + The request object. Request message for + [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. + name (:class:`str`): + Required. Resource name of the model + to undeploy. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.UndeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.UndeployModelRequest): + request = service.UndeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.undeploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def export_model( + self, + request: service.ExportModelRequest = None, + *, + name: str = None, + output_config: io.ModelExportOutputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + + [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.ExportModelRequest`): + The request object. Request message for + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an + error code will be returned. + name (:class:`str`): + Required. The resource name of the + model to export. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`~.io.ModelExportOutputConfig`): + Required. The desired output location + and configuration. + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ExportModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ExportModelRequest): + request = service.ExportModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def export_evaluated_examples( + self, + request: service.ExportEvaluatedExamplesRequest = None, + *, + name: str = None, + output_config: io.ExportEvaluatedExamplesOutputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Exports examples on which the model was evaluated (i.e. which + were in the TEST set of the dataset the model was created from), + together with their ground truth annotations and the annotations + created (predicted) by the model. The examples, ground truth and + predictions are exported in the state they were at the moment + the model was evaluated. + + This export is available only for 30 days since the model + evaluation is created. + + Currently only available for Tables. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Args: + request (:class:`~.service.ExportEvaluatedExamplesRequest`): + The request object. Request message for + [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. + name (:class:`str`): + Required. The resource name of the + model whose evaluated examples are to be + exported. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`~.io.ExportEvaluatedExamplesOutputConfig`): + Required. The desired output location + and configuration. + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ExportEvaluatedExamplesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ExportEvaluatedExamplesRequest): + request = service.ExportEvaluatedExamplesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.export_evaluated_examples + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def get_model_evaluation( + self, + request: service.GetModelEvaluationRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a model evaluation. + + Args: + request (:class:`~.service.GetModelEvaluationRequest`): + The request object. Request message for + [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. + name (:class:`str`): + Required. Resource name for the model + evaluation. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model_evaluation.ModelEvaluation: + Evaluation results of a model. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetModelEvaluationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetModelEvaluationRequest): + request = service.GetModelEvaluationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_model_evaluations( + self, + request: service.ListModelEvaluationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsPager: + r"""Lists model evaluations. + + Args: + request (:class:`~.service.ListModelEvaluationsRequest`): + The request object. Request message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. + parent (:class:`str`): + Required. Resource name of the model + to list the model evaluations for. If + modelId is set as "-", this will list + model evaluations from across all models + of the parent location. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListModelEvaluationsPager: + Response message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListModelEvaluationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListModelEvaluationsRequest): + request = service.ListModelEvaluationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_evaluations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelEvaluationsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-automl",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("AutoMlClient",) diff --git a/google/cloud/automl_v1beta1/services/auto_ml/pagers.py b/google/cloud/automl_v1beta1/services/auto_ml/pagers.py new file mode 100644 index 00000000..60528c89 --- /dev/null +++ b/google/cloud/automl_v1beta1/services/auto_ml/pagers.py @@ -0,0 +1,665 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import dataset +from google.cloud.automl_v1beta1.types import model +from google.cloud.automl_v1beta1.types import model_evaluation +from google.cloud.automl_v1beta1.types import service +from google.cloud.automl_v1beta1.types import table_spec + + +class ListDatasetsPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`~.service.ListDatasetsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`~.service.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service.ListDatasetsResponse], + request: service.ListDatasetsRequest, + response: service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListDatasetsRequest`): + The initial request object. + response (:class:`~.service.ListDatasetsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[dataset.Dataset]: + for page in self.pages: + yield from page.datasets + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDatasetsAsyncPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`~.service.ListDatasetsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`~.service.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[service.ListDatasetsResponse]], + request: service.ListDatasetsRequest, + response: service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListDatasetsRequest`): + The initial request object. + response (:class:`~.service.ListDatasetsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[dataset.Dataset]: + async def async_generator(): + async for page in self.pages: + for response in page.datasets: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTableSpecsPager: + """A pager for iterating through ``list_table_specs`` requests. + + This class thinly wraps an initial + :class:`~.service.ListTableSpecsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``table_specs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTableSpecs`` requests and continue to iterate + through the ``table_specs`` field on the + corresponding responses. + + All the usual :class:`~.service.ListTableSpecsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service.ListTableSpecsResponse], + request: service.ListTableSpecsRequest, + response: service.ListTableSpecsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListTableSpecsRequest`): + The initial request object. + response (:class:`~.service.ListTableSpecsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListTableSpecsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[service.ListTableSpecsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[table_spec.TableSpec]: + for page in self.pages: + yield from page.table_specs + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTableSpecsAsyncPager: + """A pager for iterating through ``list_table_specs`` requests. + + This class thinly wraps an initial + :class:`~.service.ListTableSpecsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``table_specs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTableSpecs`` requests and continue to iterate + through the ``table_specs`` field on the + corresponding responses. + + All the usual :class:`~.service.ListTableSpecsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[service.ListTableSpecsResponse]], + request: service.ListTableSpecsRequest, + response: service.ListTableSpecsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListTableSpecsRequest`): + The initial request object. + response (:class:`~.service.ListTableSpecsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListTableSpecsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[service.ListTableSpecsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[table_spec.TableSpec]: + async def async_generator(): + async for page in self.pages: + for response in page.table_specs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListColumnSpecsPager: + """A pager for iterating through ``list_column_specs`` requests. + + This class thinly wraps an initial + :class:`~.service.ListColumnSpecsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``column_specs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListColumnSpecs`` requests and continue to iterate + through the ``column_specs`` field on the + corresponding responses. + + All the usual :class:`~.service.ListColumnSpecsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service.ListColumnSpecsResponse], + request: service.ListColumnSpecsRequest, + response: service.ListColumnSpecsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListColumnSpecsRequest`): + The initial request object. + response (:class:`~.service.ListColumnSpecsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListColumnSpecsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[service.ListColumnSpecsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[column_spec.ColumnSpec]: + for page in self.pages: + yield from page.column_specs + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListColumnSpecsAsyncPager: + """A pager for iterating through ``list_column_specs`` requests. + + This class thinly wraps an initial + :class:`~.service.ListColumnSpecsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``column_specs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListColumnSpecs`` requests and continue to iterate + through the ``column_specs`` field on the + corresponding responses. + + All the usual :class:`~.service.ListColumnSpecsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[service.ListColumnSpecsResponse]], + request: service.ListColumnSpecsRequest, + response: service.ListColumnSpecsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListColumnSpecsRequest`): + The initial request object. + response (:class:`~.service.ListColumnSpecsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListColumnSpecsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[service.ListColumnSpecsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[column_spec.ColumnSpec]: + async def async_generator(): + async for page in self.pages: + for response in page.column_specs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelsPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`~.service.ListModelsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``model`` field on the + corresponding responses. + + All the usual :class:`~.service.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service.ListModelsResponse], + request: service.ListModelsRequest, + response: service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListModelsRequest`): + The initial request object. + response (:class:`~.service.ListModelsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[model.Model]: + for page in self.pages: + yield from page.model + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelsAsyncPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`~.service.ListModelsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``model`` field on the + corresponding responses. + + All the usual :class:`~.service.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[service.ListModelsResponse]], + request: service.ListModelsRequest, + response: service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListModelsRequest`): + The initial request object. + response (:class:`~.service.ListModelsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[model.Model]: + async def async_generator(): + async for page in self.pages: + for response in page.model: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`~.service.ListModelEvaluationsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_evaluation`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluation`` field on the + corresponding responses. + + All the usual :class:`~.service.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., service.ListModelEvaluationsResponse], + request: service.ListModelEvaluationsRequest, + response: service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListModelEvaluationsRequest`): + The initial request object. + response (:class:`~.service.ListModelEvaluationsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[model_evaluation.ModelEvaluation]: + for page in self.pages: + yield from page.model_evaluation + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsAsyncPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`~.service.ListModelEvaluationsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_evaluation`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluation`` field on the + corresponding responses. + + All the usual :class:`~.service.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[service.ListModelEvaluationsResponse]], + request: service.ListModelEvaluationsRequest, + response: service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.service.ListModelEvaluationsRequest`): + The initial request object. + response (:class:`~.service.ListModelEvaluationsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[model_evaluation.ModelEvaluation]: + async def async_generator(): + async for page in self.pages: + for response in page.model_evaluation: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py new file mode 100644 index 00000000..9e5456eb --- /dev/null +++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import AutoMlTransport +from .grpc import AutoMlGrpcTransport +from .grpc_asyncio import AutoMlGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[AutoMlTransport]] +_transport_registry["grpc"] = AutoMlGrpcTransport +_transport_registry["grpc_asyncio"] = AutoMlGrpcAsyncIOTransport + + +__all__ = ( + "AutoMlTransport", + "AutoMlGrpcTransport", + "AutoMlGrpcAsyncIOTransport", +) diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py new file mode 100644 index 00000000..d50f2201 --- /dev/null +++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py @@ -0,0 +1,561 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.automl_v1beta1.types import annotation_spec +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec +from google.cloud.automl_v1beta1.types import dataset +from google.cloud.automl_v1beta1.types import dataset as gca_dataset +from google.cloud.automl_v1beta1.types import model +from google.cloud.automl_v1beta1.types import model_evaluation +from google.cloud.automl_v1beta1.types import service +from google.cloud.automl_v1beta1.types import table_spec +from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec +from google.longrunning import operations_pb2 as operations # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-automl",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class AutoMlTransport(abc.ABC): + """Abstract transport class for AutoMl.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_dataset: gapic_v1.method.wrap_method( + self.create_dataset, default_timeout=5.0, client_info=client_info, + ), + self.get_dataset: gapic_v1.method.wrap_method( + self.get_dataset, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_datasets: gapic_v1.method.wrap_method( + self.list_datasets, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.update_dataset: gapic_v1.method.wrap_method( + self.update_dataset, default_timeout=5.0, client_info=client_info, + ), + self.delete_dataset: gapic_v1.method.wrap_method( + self.delete_dataset, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.import_data: gapic_v1.method.wrap_method( + self.import_data, default_timeout=5.0, client_info=client_info, + ), + self.export_data: gapic_v1.method.wrap_method( + self.export_data, default_timeout=5.0, client_info=client_info, + ), + self.get_annotation_spec: gapic_v1.method.wrap_method( + self.get_annotation_spec, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.get_table_spec: gapic_v1.method.wrap_method( + self.get_table_spec, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_table_specs: gapic_v1.method.wrap_method( + self.list_table_specs, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.update_table_spec: gapic_v1.method.wrap_method( + self.update_table_spec, default_timeout=5.0, client_info=client_info, + ), + self.get_column_spec: gapic_v1.method.wrap_method( + self.get_column_spec, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_column_specs: gapic_v1.method.wrap_method( + self.list_column_specs, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.update_column_spec: gapic_v1.method.wrap_method( + self.update_column_spec, default_timeout=5.0, client_info=client_info, + ), + self.create_model: gapic_v1.method.wrap_method( + self.create_model, default_timeout=5.0, client_info=client_info, + ), + self.get_model: gapic_v1.method.wrap_method( + self.get_model, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_models: gapic_v1.method.wrap_method( + self.list_models, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.delete_model: gapic_v1.method.wrap_method( + self.delete_model, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.deploy_model: gapic_v1.method.wrap_method( + self.deploy_model, default_timeout=5.0, client_info=client_info, + ), + self.undeploy_model: gapic_v1.method.wrap_method( + self.undeploy_model, default_timeout=5.0, client_info=client_info, + ), + self.export_model: gapic_v1.method.wrap_method( + self.export_model, default_timeout=5.0, client_info=client_info, + ), + self.export_evaluated_examples: gapic_v1.method.wrap_method( + self.export_evaluated_examples, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model_evaluation: gapic_v1.method.wrap_method( + self.get_model_evaluation, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_model_evaluations: gapic_v1.method.wrap_method( + self.list_model_evaluations, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_dataset( + self, + ) -> typing.Callable[ + [service.CreateDatasetRequest], + typing.Union[gca_dataset.Dataset, typing.Awaitable[gca_dataset.Dataset]], + ]: + raise NotImplementedError() + + @property + def get_dataset( + self, + ) -> typing.Callable[ + [service.GetDatasetRequest], + typing.Union[dataset.Dataset, typing.Awaitable[dataset.Dataset]], + ]: + raise NotImplementedError() + + @property + def list_datasets( + self, + ) -> typing.Callable[ + [service.ListDatasetsRequest], + typing.Union[ + service.ListDatasetsResponse, typing.Awaitable[service.ListDatasetsResponse] + ], + ]: + raise NotImplementedError() + + @property + def update_dataset( + self, + ) -> typing.Callable[ + [service.UpdateDatasetRequest], + typing.Union[gca_dataset.Dataset, typing.Awaitable[gca_dataset.Dataset]], + ]: + raise NotImplementedError() + + @property + def delete_dataset( + self, + ) -> typing.Callable[ + [service.DeleteDatasetRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def import_data( + self, + ) -> typing.Callable[ + [service.ImportDataRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def export_data( + self, + ) -> typing.Callable[ + [service.ExportDataRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_annotation_spec( + self, + ) -> typing.Callable[ + [service.GetAnnotationSpecRequest], + typing.Union[ + annotation_spec.AnnotationSpec, + typing.Awaitable[annotation_spec.AnnotationSpec], + ], + ]: + raise NotImplementedError() + + @property + def get_table_spec( + self, + ) -> typing.Callable[ + [service.GetTableSpecRequest], + typing.Union[table_spec.TableSpec, typing.Awaitable[table_spec.TableSpec]], + ]: + raise NotImplementedError() + + @property + def list_table_specs( + self, + ) -> typing.Callable[ + [service.ListTableSpecsRequest], + typing.Union[ + service.ListTableSpecsResponse, + typing.Awaitable[service.ListTableSpecsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_table_spec( + self, + ) -> typing.Callable[ + [service.UpdateTableSpecRequest], + typing.Union[ + gca_table_spec.TableSpec, typing.Awaitable[gca_table_spec.TableSpec] + ], + ]: + raise NotImplementedError() + + @property + def get_column_spec( + self, + ) -> typing.Callable[ + [service.GetColumnSpecRequest], + typing.Union[column_spec.ColumnSpec, typing.Awaitable[column_spec.ColumnSpec]], + ]: + raise NotImplementedError() + + @property + def list_column_specs( + self, + ) -> typing.Callable[ + [service.ListColumnSpecsRequest], + typing.Union[ + service.ListColumnSpecsResponse, + typing.Awaitable[service.ListColumnSpecsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_column_spec( + self, + ) -> typing.Callable[ + [service.UpdateColumnSpecRequest], + typing.Union[ + gca_column_spec.ColumnSpec, typing.Awaitable[gca_column_spec.ColumnSpec] + ], + ]: + raise NotImplementedError() + + @property + def create_model( + self, + ) -> typing.Callable[ + [service.CreateModelRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_model( + self, + ) -> typing.Callable[ + [service.GetModelRequest], + typing.Union[model.Model, typing.Awaitable[model.Model]], + ]: + raise NotImplementedError() + + @property + def list_models( + self, + ) -> typing.Callable[ + [service.ListModelsRequest], + typing.Union[ + service.ListModelsResponse, typing.Awaitable[service.ListModelsResponse] + ], + ]: + raise NotImplementedError() + + @property + def delete_model( + self, + ) -> typing.Callable[ + [service.DeleteModelRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def deploy_model( + self, + ) -> typing.Callable[ + [service.DeployModelRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def undeploy_model( + self, + ) -> typing.Callable[ + [service.UndeployModelRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def export_model( + self, + ) -> typing.Callable[ + [service.ExportModelRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def export_evaluated_examples( + self, + ) -> typing.Callable[ + [service.ExportEvaluatedExamplesRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_model_evaluation( + self, + ) -> typing.Callable[ + [service.GetModelEvaluationRequest], + typing.Union[ + model_evaluation.ModelEvaluation, + typing.Awaitable[model_evaluation.ModelEvaluation], + ], + ]: + raise NotImplementedError() + + @property + def list_model_evaluations( + self, + ) -> typing.Callable[ + [service.ListModelEvaluationsRequest], + typing.Union[ + service.ListModelEvaluationsResponse, + typing.Awaitable[service.ListModelEvaluationsResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("AutoMlTransport",) diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py new file mode 100644 index 00000000..a3c183e4 --- /dev/null +++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py @@ -0,0 +1,947 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + + +import grpc # type: ignore + +from google.cloud.automl_v1beta1.types import annotation_spec +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec +from google.cloud.automl_v1beta1.types import dataset +from google.cloud.automl_v1beta1.types import dataset as gca_dataset +from google.cloud.automl_v1beta1.types import model +from google.cloud.automl_v1beta1.types import model_evaluation +from google.cloud.automl_v1beta1.types import service +from google.cloud.automl_v1beta1.types import table_spec +from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import AutoMlTransport, DEFAULT_CLIENT_INFO + + +class AutoMlGrpcTransport(AutoMlTransport): + """gRPC backend transport for AutoMl. + + AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def create_dataset( + self, + ) -> Callable[[service.CreateDatasetRequest], gca_dataset.Dataset]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_dataset" not in self._stubs: + self._stubs["create_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/CreateDataset", + request_serializer=service.CreateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs["create_dataset"] + + @property + def get_dataset(self) -> Callable[[service.GetDatasetRequest], dataset.Dataset]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a dataset. + + Returns: + Callable[[~.GetDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_dataset" not in self._stubs: + self._stubs["get_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/GetDataset", + request_serializer=service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs["get_dataset"] + + @property + def list_datasets( + self, + ) -> Callable[[service.ListDatasetsRequest], service.ListDatasetsResponse]: + r"""Return a callable for the list datasets method over gRPC. + + Lists datasets in a project. + + Returns: + Callable[[~.ListDatasetsRequest], + ~.ListDatasetsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_datasets" not in self._stubs: + self._stubs["list_datasets"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ListDatasets", + request_serializer=service.ListDatasetsRequest.serialize, + response_deserializer=service.ListDatasetsResponse.deserialize, + ) + return self._stubs["list_datasets"] + + @property + def update_dataset( + self, + ) -> Callable[[service.UpdateDatasetRequest], gca_dataset.Dataset]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_dataset" not in self._stubs: + self._stubs["update_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/UpdateDataset", + request_serializer=service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs["update_dataset"] + + @property + def delete_dataset( + self, + ) -> Callable[[service.DeleteDatasetRequest], operations.Operation]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteDatasetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_dataset" not in self._stubs: + self._stubs["delete_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/DeleteDataset", + request_serializer=service.DeleteDatasetRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_dataset"] + + @property + def import_data( + self, + ) -> Callable[[service.ImportDataRequest], operations.Operation]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + Returns: + Callable[[~.ImportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_data" not in self._stubs: + self._stubs["import_data"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ImportData", + request_serializer=service.ImportDataRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["import_data"] + + @property + def export_data( + self, + ) -> Callable[[service.ExportDataRequest], operations.Operation]: + r"""Return a callable for the export data method over gRPC. + + Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_data" not in self._stubs: + self._stubs["export_data"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ExportData", + request_serializer=service.ExportDataRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["export_data"] + + @property + def get_annotation_spec( + self, + ) -> Callable[[service.GetAnnotationSpecRequest], annotation_spec.AnnotationSpec]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an annotation spec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + ~.AnnotationSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_annotation_spec" not in self._stubs: + self._stubs["get_annotation_spec"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/GetAnnotationSpec", + request_serializer=service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs["get_annotation_spec"] + + @property + def get_table_spec( + self, + ) -> Callable[[service.GetTableSpecRequest], table_spec.TableSpec]: + r"""Return a callable for the get table spec method over gRPC. + + Gets a table spec. + + Returns: + Callable[[~.GetTableSpecRequest], + ~.TableSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_table_spec" not in self._stubs: + self._stubs["get_table_spec"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/GetTableSpec", + request_serializer=service.GetTableSpecRequest.serialize, + response_deserializer=table_spec.TableSpec.deserialize, + ) + return self._stubs["get_table_spec"] + + @property + def list_table_specs( + self, + ) -> Callable[[service.ListTableSpecsRequest], service.ListTableSpecsResponse]: + r"""Return a callable for the list table specs method over gRPC. + + Lists table specs in a dataset. + + Returns: + Callable[[~.ListTableSpecsRequest], + ~.ListTableSpecsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_table_specs" not in self._stubs: + self._stubs["list_table_specs"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ListTableSpecs", + request_serializer=service.ListTableSpecsRequest.serialize, + response_deserializer=service.ListTableSpecsResponse.deserialize, + ) + return self._stubs["list_table_specs"] + + @property + def update_table_spec( + self, + ) -> Callable[[service.UpdateTableSpecRequest], gca_table_spec.TableSpec]: + r"""Return a callable for the update table spec method over gRPC. + + Updates a table spec. + + Returns: + Callable[[~.UpdateTableSpecRequest], + ~.TableSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_table_spec" not in self._stubs: + self._stubs["update_table_spec"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/UpdateTableSpec", + request_serializer=service.UpdateTableSpecRequest.serialize, + response_deserializer=gca_table_spec.TableSpec.deserialize, + ) + return self._stubs["update_table_spec"] + + @property + def get_column_spec( + self, + ) -> Callable[[service.GetColumnSpecRequest], column_spec.ColumnSpec]: + r"""Return a callable for the get column spec method over gRPC. + + Gets a column spec. + + Returns: + Callable[[~.GetColumnSpecRequest], + ~.ColumnSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_column_spec" not in self._stubs: + self._stubs["get_column_spec"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/GetColumnSpec", + request_serializer=service.GetColumnSpecRequest.serialize, + response_deserializer=column_spec.ColumnSpec.deserialize, + ) + return self._stubs["get_column_spec"] + + @property + def list_column_specs( + self, + ) -> Callable[[service.ListColumnSpecsRequest], service.ListColumnSpecsResponse]: + r"""Return a callable for the list column specs method over gRPC. + + Lists column specs in a table spec. + + Returns: + Callable[[~.ListColumnSpecsRequest], + ~.ListColumnSpecsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_column_specs" not in self._stubs: + self._stubs["list_column_specs"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ListColumnSpecs", + request_serializer=service.ListColumnSpecsRequest.serialize, + response_deserializer=service.ListColumnSpecsResponse.deserialize, + ) + return self._stubs["list_column_specs"] + + @property + def update_column_spec( + self, + ) -> Callable[[service.UpdateColumnSpecRequest], gca_column_spec.ColumnSpec]: + r"""Return a callable for the update column spec method over gRPC. + + Updates a column spec. + + Returns: + Callable[[~.UpdateColumnSpecRequest], + ~.ColumnSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_column_spec" not in self._stubs: + self._stubs["update_column_spec"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/UpdateColumnSpec", + request_serializer=service.UpdateColumnSpecRequest.serialize, + response_deserializer=gca_column_spec.ColumnSpec.deserialize, + ) + return self._stubs["update_column_spec"] + + @property + def create_model( + self, + ) -> Callable[[service.CreateModelRequest], operations.Operation]: + r"""Return a callable for the create model method over gRPC. + + Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + Returns: + Callable[[~.CreateModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_model" not in self._stubs: + self._stubs["create_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/CreateModel", + request_serializer=service.CreateModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_model"] + + @property + def get_model(self) -> Callable[[service.GetModelRequest], model.Model]: + r"""Return a callable for the get model method over gRPC. + + Gets a model. + + Returns: + Callable[[~.GetModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model" not in self._stubs: + self._stubs["get_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/GetModel", + request_serializer=service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs["get_model"] + + @property + def list_models( + self, + ) -> Callable[[service.ListModelsRequest], service.ListModelsResponse]: + r"""Return a callable for the list models method over gRPC. + + Lists models. + + Returns: + Callable[[~.ListModelsRequest], + ~.ListModelsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_models" not in self._stubs: + self._stubs["list_models"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ListModels", + request_serializer=service.ListModelsRequest.serialize, + response_deserializer=service.ListModelsResponse.deserialize, + ) + return self._stubs["list_models"] + + @property + def delete_model( + self, + ) -> Callable[[service.DeleteModelRequest], operations.Operation]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_model" not in self._stubs: + self._stubs["delete_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/DeleteModel", + request_serializer=service.DeleteModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_model"] + + @property + def deploy_model( + self, + ) -> Callable[[service.DeployModelRequest], operations.Operation]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + + [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.DeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "deploy_model" not in self._stubs: + self._stubs["deploy_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/DeployModel", + request_serializer=service.DeployModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["deploy_model"] + + @property + def undeploy_model( + self, + ) -> Callable[[service.UndeployModelRequest], operations.Operation]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.UndeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "undeploy_model" not in self._stubs: + self._stubs["undeploy_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/UndeployModel", + request_serializer=service.UndeployModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["undeploy_model"] + + @property + def export_model( + self, + ) -> Callable[[service.ExportModelRequest], operations.Operation]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + + [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_model" not in self._stubs: + self._stubs["export_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ExportModel", + request_serializer=service.ExportModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["export_model"] + + @property + def export_evaluated_examples( + self, + ) -> Callable[[service.ExportEvaluatedExamplesRequest], operations.Operation]: + r"""Return a callable for the export evaluated examples method over gRPC. + + Exports examples on which the model was evaluated (i.e. which + were in the TEST set of the dataset the model was created from), + together with their ground truth annotations and the annotations + created (predicted) by the model. The examples, ground truth and + predictions are exported in the state they were at the moment + the model was evaluated. + + This export is available only for 30 days since the model + evaluation is created. + + Currently only available for Tables. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportEvaluatedExamplesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_evaluated_examples" not in self._stubs: + self._stubs["export_evaluated_examples"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ExportEvaluatedExamples", + request_serializer=service.ExportEvaluatedExamplesRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["export_evaluated_examples"] + + @property + def get_model_evaluation( + self, + ) -> Callable[ + [service.GetModelEvaluationRequest], model_evaluation.ModelEvaluation + ]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a model evaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + ~.ModelEvaluation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model_evaluation" not in self._stubs: + self._stubs["get_model_evaluation"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/GetModelEvaluation", + request_serializer=service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs["get_model_evaluation"] + + @property + def list_model_evaluations( + self, + ) -> Callable[ + [service.ListModelEvaluationsRequest], service.ListModelEvaluationsResponse + ]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists model evaluations. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + ~.ListModelEvaluationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_model_evaluations" not in self._stubs: + self._stubs["list_model_evaluations"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ListModelEvaluations", + request_serializer=service.ListModelEvaluationsRequest.serialize, + response_deserializer=service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs["list_model_evaluations"] + + +__all__ = ("AutoMlGrpcTransport",) diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py new file mode 100644 index 00000000..c8d24dad --- /dev/null +++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py @@ -0,0 +1,957 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.automl_v1beta1.types import annotation_spec +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec +from google.cloud.automl_v1beta1.types import dataset +from google.cloud.automl_v1beta1.types import dataset as gca_dataset +from google.cloud.automl_v1beta1.types import model +from google.cloud.automl_v1beta1.types import model_evaluation +from google.cloud.automl_v1beta1.types import service +from google.cloud.automl_v1beta1.types import table_spec +from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import AutoMlTransport, DEFAULT_CLIENT_INFO +from .grpc import AutoMlGrpcTransport + + +class AutoMlGrpcAsyncIOTransport(AutoMlTransport): + """gRPC AsyncIO backend transport for AutoMl. + + AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def create_dataset( + self, + ) -> Callable[[service.CreateDatasetRequest], Awaitable[gca_dataset.Dataset]]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_dataset" not in self._stubs: + self._stubs["create_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/CreateDataset", + request_serializer=service.CreateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs["create_dataset"] + + @property + def get_dataset( + self, + ) -> Callable[[service.GetDatasetRequest], Awaitable[dataset.Dataset]]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a dataset. + + Returns: + Callable[[~.GetDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_dataset" not in self._stubs: + self._stubs["get_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/GetDataset", + request_serializer=service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs["get_dataset"] + + @property + def list_datasets( + self, + ) -> Callable[ + [service.ListDatasetsRequest], Awaitable[service.ListDatasetsResponse] + ]: + r"""Return a callable for the list datasets method over gRPC. + + Lists datasets in a project. + + Returns: + Callable[[~.ListDatasetsRequest], + Awaitable[~.ListDatasetsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_datasets" not in self._stubs: + self._stubs["list_datasets"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ListDatasets", + request_serializer=service.ListDatasetsRequest.serialize, + response_deserializer=service.ListDatasetsResponse.deserialize, + ) + return self._stubs["list_datasets"] + + @property + def update_dataset( + self, + ) -> Callable[[service.UpdateDatasetRequest], Awaitable[gca_dataset.Dataset]]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_dataset" not in self._stubs: + self._stubs["update_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/UpdateDataset", + request_serializer=service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs["update_dataset"] + + @property + def delete_dataset( + self, + ) -> Callable[[service.DeleteDatasetRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteDatasetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_dataset" not in self._stubs: + self._stubs["delete_dataset"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/DeleteDataset", + request_serializer=service.DeleteDatasetRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_dataset"] + + @property + def import_data( + self, + ) -> Callable[[service.ImportDataRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + Returns: + Callable[[~.ImportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_data" not in self._stubs: + self._stubs["import_data"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ImportData", + request_serializer=service.ImportDataRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["import_data"] + + @property + def export_data( + self, + ) -> Callable[[service.ExportDataRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the export data method over gRPC. + + Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_data" not in self._stubs: + self._stubs["export_data"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ExportData", + request_serializer=service.ExportDataRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["export_data"] + + @property + def get_annotation_spec( + self, + ) -> Callable[ + [service.GetAnnotationSpecRequest], Awaitable[annotation_spec.AnnotationSpec] + ]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an annotation spec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + Awaitable[~.AnnotationSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_annotation_spec" not in self._stubs: + self._stubs["get_annotation_spec"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/GetAnnotationSpec", + request_serializer=service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs["get_annotation_spec"] + + @property + def get_table_spec( + self, + ) -> Callable[[service.GetTableSpecRequest], Awaitable[table_spec.TableSpec]]: + r"""Return a callable for the get table spec method over gRPC. + + Gets a table spec. + + Returns: + Callable[[~.GetTableSpecRequest], + Awaitable[~.TableSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_table_spec" not in self._stubs: + self._stubs["get_table_spec"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/GetTableSpec", + request_serializer=service.GetTableSpecRequest.serialize, + response_deserializer=table_spec.TableSpec.deserialize, + ) + return self._stubs["get_table_spec"] + + @property + def list_table_specs( + self, + ) -> Callable[ + [service.ListTableSpecsRequest], Awaitable[service.ListTableSpecsResponse] + ]: + r"""Return a callable for the list table specs method over gRPC. + + Lists table specs in a dataset. + + Returns: + Callable[[~.ListTableSpecsRequest], + Awaitable[~.ListTableSpecsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_table_specs" not in self._stubs: + self._stubs["list_table_specs"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ListTableSpecs", + request_serializer=service.ListTableSpecsRequest.serialize, + response_deserializer=service.ListTableSpecsResponse.deserialize, + ) + return self._stubs["list_table_specs"] + + @property + def update_table_spec( + self, + ) -> Callable[ + [service.UpdateTableSpecRequest], Awaitable[gca_table_spec.TableSpec] + ]: + r"""Return a callable for the update table spec method over gRPC. + + Updates a table spec. + + Returns: + Callable[[~.UpdateTableSpecRequest], + Awaitable[~.TableSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_table_spec" not in self._stubs: + self._stubs["update_table_spec"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/UpdateTableSpec", + request_serializer=service.UpdateTableSpecRequest.serialize, + response_deserializer=gca_table_spec.TableSpec.deserialize, + ) + return self._stubs["update_table_spec"] + + @property + def get_column_spec( + self, + ) -> Callable[[service.GetColumnSpecRequest], Awaitable[column_spec.ColumnSpec]]: + r"""Return a callable for the get column spec method over gRPC. + + Gets a column spec. + + Returns: + Callable[[~.GetColumnSpecRequest], + Awaitable[~.ColumnSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_column_spec" not in self._stubs: + self._stubs["get_column_spec"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/GetColumnSpec", + request_serializer=service.GetColumnSpecRequest.serialize, + response_deserializer=column_spec.ColumnSpec.deserialize, + ) + return self._stubs["get_column_spec"] + + @property + def list_column_specs( + self, + ) -> Callable[ + [service.ListColumnSpecsRequest], Awaitable[service.ListColumnSpecsResponse] + ]: + r"""Return a callable for the list column specs method over gRPC. + + Lists column specs in a table spec. + + Returns: + Callable[[~.ListColumnSpecsRequest], + Awaitable[~.ListColumnSpecsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_column_specs" not in self._stubs: + self._stubs["list_column_specs"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ListColumnSpecs", + request_serializer=service.ListColumnSpecsRequest.serialize, + response_deserializer=service.ListColumnSpecsResponse.deserialize, + ) + return self._stubs["list_column_specs"] + + @property + def update_column_spec( + self, + ) -> Callable[ + [service.UpdateColumnSpecRequest], Awaitable[gca_column_spec.ColumnSpec] + ]: + r"""Return a callable for the update column spec method over gRPC. + + Updates a column spec. + + Returns: + Callable[[~.UpdateColumnSpecRequest], + Awaitable[~.ColumnSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_column_spec" not in self._stubs: + self._stubs["update_column_spec"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/UpdateColumnSpec", + request_serializer=service.UpdateColumnSpecRequest.serialize, + response_deserializer=gca_column_spec.ColumnSpec.deserialize, + ) + return self._stubs["update_column_spec"] + + @property + def create_model( + self, + ) -> Callable[[service.CreateModelRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the create model method over gRPC. + + Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + Returns: + Callable[[~.CreateModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_model" not in self._stubs: + self._stubs["create_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/CreateModel", + request_serializer=service.CreateModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_model"] + + @property + def get_model(self) -> Callable[[service.GetModelRequest], Awaitable[model.Model]]: + r"""Return a callable for the get model method over gRPC. + + Gets a model. + + Returns: + Callable[[~.GetModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model" not in self._stubs: + self._stubs["get_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/GetModel", + request_serializer=service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs["get_model"] + + @property + def list_models( + self, + ) -> Callable[[service.ListModelsRequest], Awaitable[service.ListModelsResponse]]: + r"""Return a callable for the list models method over gRPC. + + Lists models. + + Returns: + Callable[[~.ListModelsRequest], + Awaitable[~.ListModelsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_models" not in self._stubs: + self._stubs["list_models"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ListModels", + request_serializer=service.ListModelsRequest.serialize, + response_deserializer=service.ListModelsResponse.deserialize, + ) + return self._stubs["list_models"] + + @property + def delete_model( + self, + ) -> Callable[[service.DeleteModelRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_model" not in self._stubs: + self._stubs["delete_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/DeleteModel", + request_serializer=service.DeleteModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_model"] + + @property + def deploy_model( + self, + ) -> Callable[[service.DeployModelRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + + [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.DeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "deploy_model" not in self._stubs: + self._stubs["deploy_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/DeployModel", + request_serializer=service.DeployModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["deploy_model"] + + @property + def undeploy_model( + self, + ) -> Callable[[service.UndeployModelRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.UndeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "undeploy_model" not in self._stubs: + self._stubs["undeploy_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/UndeployModel", + request_serializer=service.UndeployModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["undeploy_model"] + + @property + def export_model( + self, + ) -> Callable[[service.ExportModelRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + + [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_model" not in self._stubs: + self._stubs["export_model"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ExportModel", + request_serializer=service.ExportModelRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["export_model"] + + @property + def export_evaluated_examples( + self, + ) -> Callable[ + [service.ExportEvaluatedExamplesRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the export evaluated examples method over gRPC. + + Exports examples on which the model was evaluated (i.e. which + were in the TEST set of the dataset the model was created from), + together with their ground truth annotations and the annotations + created (predicted) by the model. The examples, ground truth and + predictions are exported in the state they were at the moment + the model was evaluated. + + This export is available only for 30 days since the model + evaluation is created. + + Currently only available for Tables. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportEvaluatedExamplesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "export_evaluated_examples" not in self._stubs: + self._stubs["export_evaluated_examples"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ExportEvaluatedExamples", + request_serializer=service.ExportEvaluatedExamplesRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["export_evaluated_examples"] + + @property + def get_model_evaluation( + self, + ) -> Callable[ + [service.GetModelEvaluationRequest], Awaitable[model_evaluation.ModelEvaluation] + ]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a model evaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + Awaitable[~.ModelEvaluation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model_evaluation" not in self._stubs: + self._stubs["get_model_evaluation"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/GetModelEvaluation", + request_serializer=service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs["get_model_evaluation"] + + @property + def list_model_evaluations( + self, + ) -> Callable[ + [service.ListModelEvaluationsRequest], + Awaitable[service.ListModelEvaluationsResponse], + ]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists model evaluations. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + Awaitable[~.ListModelEvaluationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_model_evaluations" not in self._stubs: + self._stubs["list_model_evaluations"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.AutoMl/ListModelEvaluations", + request_serializer=service.ListModelEvaluationsRequest.serialize, + response_deserializer=service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs["list_model_evaluations"] + + +__all__ = ("AutoMlGrpcAsyncIOTransport",) diff --git a/google/cloud/automl_v1beta1/services/prediction_service/__init__.py b/google/cloud/automl_v1beta1/services/prediction_service/__init__.py new file mode 100644 index 00000000..0c847693 --- /dev/null +++ b/google/cloud/automl_v1beta1/services/prediction_service/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import PredictionServiceClient +from .async_client import PredictionServiceAsyncClient + +__all__ = ( + "PredictionServiceClient", + "PredictionServiceAsyncClient", +) diff --git a/google/cloud/automl_v1beta1/services/prediction_service/async_client.py b/google/cloud/automl_v1beta1/services/prediction_service/async_client.py new file mode 100644 index 00000000..cd313402 --- /dev/null +++ b/google/cloud/automl_v1beta1/services/prediction_service/async_client.py @@ -0,0 +1,458 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.automl_v1beta1.types import annotation_payload +from google.cloud.automl_v1beta1.types import data_items +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import operations +from google.cloud.automl_v1beta1.types import prediction_service + +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport +from .client import PredictionServiceClient + + +class PredictionServiceAsyncClient: + """AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + """ + + _client: PredictionServiceClient + + DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT + + from_service_account_file = PredictionServiceClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, PredictionServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = PredictionServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def predict( + self, + request: prediction_service.PredictRequest = None, + *, + name: str = None, + payload: data_items.ExamplePayload = None, + params: Sequence[prediction_service.PredictRequest.ParamsEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. The prediction result will be + directly returned in the response. Available for following ML + problems, and their expected request payloads: + + - Image Classification - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Image Object Detection - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Text Classification - TextSnippet, content up to 60,000 + characters, UTF-8 encoded. + - Text Extraction - TextSnippet, content up to 30,000 + characters, UTF-8 NFC encoded. + - Translation - TextSnippet, content up to 25,000 characters, + UTF-8 encoded. + - Tables - Row, with column values matching the columns of the + model, up to 5MB. Not available for FORECASTING + + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]. + + - Text Sentiment - TextSnippet, content up 500 characters, + UTF-8 encoded. + + Args: + request (:class:`~.prediction_service.PredictRequest`): + The request object. Request message for + [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. + name (:class:`str`): + Required. Name of the model requested + to serve the prediction. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + payload (:class:`~.data_items.ExamplePayload`): + Required. Payload to perform a + prediction on. The payload must match + the problem type that the model was + trained to solve. + This corresponds to the ``payload`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (:class:`Sequence[~.prediction_service.PredictRequest.ParamsEntry]`): + Additional domain-specific parameters, any string must + be up to 25000 characters long. + + - For Image Classification: + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for an image, + it will only produce results that have at least this + confidence score. The default is 0.5. + + - For Image Object Detection: ``score_threshold`` - + (float) When Model detects objects on the image, it + will only produce bounding boxes which have at least + this confidence score. Value in 0 to 1 range, default + is 0.5. ``max_bounding_box_count`` - (int64) No more + than this number of bounding boxes will be returned + in the response. Default is 100, the requested value + may be limited by server. + + - For Tables: feature_importance - (boolean) Whether + feature importance should be populated in the + returned TablesAnnotation. The default is false. + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.prediction_service.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, payload, params]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = prediction_service.PredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if payload is not None: + request.payload = payload + if params is not None: + request.params = params + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.predict, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def batch_predict( + self, + request: prediction_service.BatchPredictRequest = None, + *, + name: str = None, + input_config: io.BatchPredictInputConfig = None, + output_config: io.BatchPredictOutputConfig = None, + params: Sequence[prediction_service.BatchPredictRequest.ParamsEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML problems: + + - Image Classification + - Image Object Detection + - Video Classification + - Video Object Tracking \* Text Extraction + - Tables + + Args: + request (:class:`~.prediction_service.BatchPredictRequest`): + The request object. Request message for + [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. + name (:class:`str`): + Required. Name of the model requested + to serve the batch prediction. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (:class:`~.io.BatchPredictInputConfig`): + Required. The input configuration for + batch prediction. + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`~.io.BatchPredictOutputConfig`): + Required. The Configuration + specifying where output predictions + should be written. + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (:class:`Sequence[~.prediction_service.BatchPredictRequest.ParamsEntry]`): + Required. Additional domain-specific parameters for the + predictions, any string must be up to 25000 characters + long. + + - For Text Classification: + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for a text + snippet, it will only produce results that have at + least this confidence score. The default is 0.5. + + - For Image Classification: + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for an image, + it will only produce results that have at least this + confidence score. The default is 0.5. + + - For Image Object Detection: + + ``score_threshold`` - (float) When Model detects + objects on the image, it will only produce bounding + boxes which have at least this confidence score. + Value in 0 to 1 range, default is 0.5. + ``max_bounding_box_count`` - (int64) No more than + this number of bounding boxes will be produced per + image. Default is 100, the requested value may be + limited by server. + + - For Video Classification : + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for a video, it + will only produce results that have at least this + confidence score. The default is 0.5. + ``segment_classification`` - (boolean) Set to true to + request segment-level classification. AutoML Video + Intelligence returns labels and their confidence + scores for the entire segment of the video that user + specified in the request configuration. The default + is "true". ``shot_classification`` - (boolean) Set to + true to request shot-level classification. AutoML + Video Intelligence determines the boundaries for each + camera shot in the entire segment of the video that + user specified in the request configuration. AutoML + Video Intelligence then returns labels and their + confidence scores for each detected shot, along with + the start and end time of the shot. WARNING: Model + evaluation is not done for this classification type, + the quality of it depends on training data, but there + are no metrics provided to describe that quality. The + default is "false". ``1s_interval_classification`` - + (boolean) Set to true to request classification for a + video at one-second intervals. AutoML Video + Intelligence returns labels and their confidence + scores for each second of the entire segment of the + video that user specified in the request + configuration. WARNING: Model evaluation is not done + for this classification type, the quality of it + depends on training data, but there are no metrics + provided to describe that quality. The default is + "false". + + - For Tables: + + feature_importance - (boolean) Whether feature + importance should be populated in the returned + TablesAnnotations. The default is false. + + - For Video Object Tracking: + + ``score_threshold`` - (float) When Model detects + objects on video frames, it will only produce + bounding boxes which have at least this confidence + score. Value in 0 to 1 range, default is 0.5. + ``max_bounding_box_count`` - (int64) No more than + this number of bounding boxes will be returned per + frame. Default is 100, the requested value may be + limited by server. ``min_bounding_box_size`` - + (float) Only bounding boxes with shortest edge at + least that long as a relative value of video frame + size will be returned. Value in 0 to 1 range. Default + is 0. + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.prediction_service.BatchPredictResult``: + Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of the + operation returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, input_config, output_config, params]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = prediction_service.BatchPredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + if output_config is not None: + request.output_config = output_config + if params is not None: + request.params = params + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_predict, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + prediction_service.BatchPredictResult, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-automl",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("PredictionServiceAsyncClient",) diff --git a/google/cloud/automl_v1beta1/services/prediction_service/client.py b/google/cloud/automl_v1beta1/services/prediction_service/client.py new file mode 100644 index 00000000..81cb0649 --- /dev/null +++ b/google/cloud/automl_v1beta1/services/prediction_service/client.py @@ -0,0 +1,593 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import os +import re +from typing import Callable, Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.automl_v1beta1.types import annotation_payload +from google.cloud.automl_v1beta1.types import data_items +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import operations +from google.cloud.automl_v1beta1.types import prediction_service + +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import PredictionServiceGrpcTransport +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport + + +class PredictionServiceClientMeta(type): + """Metaclass for the PredictionService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[PredictionServiceTransport]] + _transport_registry["grpc"] = PredictionServiceGrpcTransport + _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[PredictionServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PredictionServiceClient(metaclass=PredictionServiceClientMeta): + """AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "automl.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, PredictionServiceTransport] = None, + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = ClientOptions.from_dict(client_options) + if client_options is None: + client_options = ClientOptions.ClientOptions() + + if client_options.api_endpoint is None: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + if use_mtls_env == "never": + client_options.api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + has_client_cert_source = ( + client_options.client_cert_source is not None + or mtls.has_default_client_cert_source() + ) + client_options.api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT + if has_client_cert_source + else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PredictionServiceTransport): + # transport is a PredictionServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=client_options.api_endpoint, + scopes=client_options.scopes, + api_mtls_endpoint=client_options.api_endpoint, + client_cert_source=client_options.client_cert_source, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def predict( + self, + request: prediction_service.PredictRequest = None, + *, + name: str = None, + payload: data_items.ExamplePayload = None, + params: Sequence[prediction_service.PredictRequest.ParamsEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. The prediction result will be + directly returned in the response. Available for following ML + problems, and their expected request payloads: + + - Image Classification - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Image Object Detection - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Text Classification - TextSnippet, content up to 60,000 + characters, UTF-8 encoded. + - Text Extraction - TextSnippet, content up to 30,000 + characters, UTF-8 NFC encoded. + - Translation - TextSnippet, content up to 25,000 characters, + UTF-8 encoded. + - Tables - Row, with column values matching the columns of the + model, up to 5MB. Not available for FORECASTING + + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]. + + - Text Sentiment - TextSnippet, content up 500 characters, + UTF-8 encoded. + + Args: + request (:class:`~.prediction_service.PredictRequest`): + The request object. Request message for + [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. + name (:class:`str`): + Required. Name of the model requested + to serve the prediction. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + payload (:class:`~.data_items.ExamplePayload`): + Required. Payload to perform a + prediction on. The payload must match + the problem type that the model was + trained to solve. + This corresponds to the ``payload`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (:class:`Sequence[~.prediction_service.PredictRequest.ParamsEntry]`): + Additional domain-specific parameters, any string must + be up to 25000 characters long. + + - For Image Classification: + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for an image, + it will only produce results that have at least this + confidence score. The default is 0.5. + + - For Image Object Detection: ``score_threshold`` - + (float) When Model detects objects on the image, it + will only produce bounding boxes which have at least + this confidence score. Value in 0 to 1 range, default + is 0.5. ``max_bounding_box_count`` - (int64) No more + than this number of bounding boxes will be returned + in the response. Default is 100, the requested value + may be limited by server. + + - For Tables: feature_importance - (boolean) Whether + feature importance should be populated in the + returned TablesAnnotation. The default is false. + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.prediction_service.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, payload, params]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.PredictRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.PredictRequest): + request = prediction_service.PredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if payload is not None: + request.payload = payload + if params is not None: + request.params = params + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.predict] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def batch_predict( + self, + request: prediction_service.BatchPredictRequest = None, + *, + name: str = None, + input_config: io.BatchPredictInputConfig = None, + output_config: io.BatchPredictOutputConfig = None, + params: Sequence[prediction_service.BatchPredictRequest.ParamsEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML problems: + + - Image Classification + - Image Object Detection + - Video Classification + - Video Object Tracking \* Text Extraction + - Tables + + Args: + request (:class:`~.prediction_service.BatchPredictRequest`): + The request object. Request message for + [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. + name (:class:`str`): + Required. Name of the model requested + to serve the batch prediction. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (:class:`~.io.BatchPredictInputConfig`): + Required. The input configuration for + batch prediction. + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`~.io.BatchPredictOutputConfig`): + Required. The Configuration + specifying where output predictions + should be written. + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (:class:`Sequence[~.prediction_service.BatchPredictRequest.ParamsEntry]`): + Required. Additional domain-specific parameters for the + predictions, any string must be up to 25000 characters + long. + + - For Text Classification: + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for a text + snippet, it will only produce results that have at + least this confidence score. The default is 0.5. + + - For Image Classification: + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for an image, + it will only produce results that have at least this + confidence score. The default is 0.5. + + - For Image Object Detection: + + ``score_threshold`` - (float) When Model detects + objects on the image, it will only produce bounding + boxes which have at least this confidence score. + Value in 0 to 1 range, default is 0.5. + ``max_bounding_box_count`` - (int64) No more than + this number of bounding boxes will be produced per + image. Default is 100, the requested value may be + limited by server. + + - For Video Classification : + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for a video, it + will only produce results that have at least this + confidence score. The default is 0.5. + ``segment_classification`` - (boolean) Set to true to + request segment-level classification. AutoML Video + Intelligence returns labels and their confidence + scores for the entire segment of the video that user + specified in the request configuration. The default + is "true". ``shot_classification`` - (boolean) Set to + true to request shot-level classification. AutoML + Video Intelligence determines the boundaries for each + camera shot in the entire segment of the video that + user specified in the request configuration. AutoML + Video Intelligence then returns labels and their + confidence scores for each detected shot, along with + the start and end time of the shot. WARNING: Model + evaluation is not done for this classification type, + the quality of it depends on training data, but there + are no metrics provided to describe that quality. The + default is "false". ``1s_interval_classification`` - + (boolean) Set to true to request classification for a + video at one-second intervals. AutoML Video + Intelligence returns labels and their confidence + scores for each second of the entire segment of the + video that user specified in the request + configuration. WARNING: Model evaluation is not done + for this classification type, the quality of it + depends on training data, but there are no metrics + provided to describe that quality. The default is + "false". + + - For Tables: + + feature_importance - (boolean) Whether feature + importance should be populated in the returned + TablesAnnotations. The default is false. + + - For Video Object Tracking: + + ``score_threshold`` - (float) When Model detects + objects on video frames, it will only produce + bounding boxes which have at least this confidence + score. Value in 0 to 1 range, default is 0.5. + ``max_bounding_box_count`` - (int64) No more than + this number of bounding boxes will be returned per + frame. Default is 100, the requested value may be + limited by server. ``min_bounding_box_size`` - + (float) Only bounding boxes with shortest edge at + least that long as a relative value of video frame + size will be returned. Value in 0 to 1 range. Default + is 0. + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.prediction_service.BatchPredictResult``: + Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of the + operation returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, input_config, output_config, params]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.BatchPredictRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.BatchPredictRequest): + request = prediction_service.BatchPredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + if output_config is not None: + request.output_config = output_config + if params is not None: + request.params = params + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_predict] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + prediction_service.BatchPredictResult, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-automl",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("PredictionServiceClient",) diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py new file mode 100644 index 00000000..7eb32ea8 --- /dev/null +++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import PredictionServiceTransport +from .grpc import PredictionServiceGrpcTransport +from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] +_transport_registry["grpc"] = PredictionServiceGrpcTransport +_transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport + + +__all__ = ( + "PredictionServiceTransport", + "PredictionServiceGrpcTransport", + "PredictionServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py new file mode 100644 index 00000000..bb674eca --- /dev/null +++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.automl_v1beta1.types import prediction_service +from google.longrunning import operations_pb2 as operations # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-automl",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class PredictionServiceTransport(abc.ABC): + """Abstract transport class for PredictionService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.predict: gapic_v1.method.wrap_method( + self.predict, default_timeout=60.0, client_info=client_info, + ), + self.batch_predict: gapic_v1.method.wrap_method( + self.batch_predict, default_timeout=60.0, client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def predict( + self, + ) -> typing.Callable[ + [prediction_service.PredictRequest], + typing.Union[ + prediction_service.PredictResponse, + typing.Awaitable[prediction_service.PredictResponse], + ], + ]: + raise NotImplementedError() + + @property + def batch_predict( + self, + ) -> typing.Callable[ + [prediction_service.BatchPredictRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + +__all__ = ("PredictionServiceTransport",) diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py new file mode 100644 index 00000000..9bc30cdd --- /dev/null +++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py @@ -0,0 +1,326 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + + +import grpc # type: ignore + +from google.cloud.automl_v1beta1.types import prediction_service +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO + + +class PredictionServiceGrpcTransport(PredictionServiceTransport): + """gRPC backend transport for PredictionService. + + AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def predict( + self, + ) -> Callable[ + [prediction_service.PredictRequest], prediction_service.PredictResponse + ]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. The prediction result will be + directly returned in the response. Available for following ML + problems, and their expected request payloads: + + - Image Classification - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Image Object Detection - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Text Classification - TextSnippet, content up to 60,000 + characters, UTF-8 encoded. + - Text Extraction - TextSnippet, content up to 30,000 + characters, UTF-8 NFC encoded. + - Translation - TextSnippet, content up to 25,000 characters, + UTF-8 encoded. + - Tables - Row, with column values matching the columns of the + model, up to 5MB. Not available for FORECASTING + + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]. + + - Text Sentiment - TextSnippet, content up 500 characters, + UTF-8 encoded. + + Returns: + Callable[[~.PredictRequest], + ~.PredictResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "predict" not in self._stubs: + self._stubs["predict"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.PredictionService/Predict", + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs["predict"] + + @property + def batch_predict( + self, + ) -> Callable[[prediction_service.BatchPredictRequest], operations.Operation]: + r"""Return a callable for the batch predict method over gRPC. + + Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML problems: + + - Image Classification + - Image Object Detection + - Video Classification + - Video Object Tracking \* Text Extraction + - Tables + + Returns: + Callable[[~.BatchPredictRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_predict" not in self._stubs: + self._stubs["batch_predict"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.PredictionService/BatchPredict", + request_serializer=prediction_service.BatchPredictRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["batch_predict"] + + +__all__ = ("PredictionServiceGrpcTransport",) diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..2c7d9712 --- /dev/null +++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -0,0 +1,322 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.automl_v1beta1.types import prediction_service +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import PredictionServiceGrpcTransport + + +class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): + """gRPC AsyncIO backend transport for PredictionService. + + AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "automl.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def predict( + self, + ) -> Callable[ + [prediction_service.PredictRequest], + Awaitable[prediction_service.PredictResponse], + ]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. The prediction result will be + directly returned in the response. Available for following ML + problems, and their expected request payloads: + + - Image Classification - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Image Object Detection - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Text Classification - TextSnippet, content up to 60,000 + characters, UTF-8 encoded. + - Text Extraction - TextSnippet, content up to 30,000 + characters, UTF-8 NFC encoded. + - Translation - TextSnippet, content up to 25,000 characters, + UTF-8 encoded. + - Tables - Row, with column values matching the columns of the + model, up to 5MB. Not available for FORECASTING + + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]. + + - Text Sentiment - TextSnippet, content up 500 characters, + UTF-8 encoded. + + Returns: + Callable[[~.PredictRequest], + Awaitable[~.PredictResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "predict" not in self._stubs: + self._stubs["predict"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.PredictionService/Predict", + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs["predict"] + + @property + def batch_predict( + self, + ) -> Callable[ + [prediction_service.BatchPredictRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the batch predict method over gRPC. + + Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML problems: + + - Image Classification + - Image Object Detection + - Video Classification + - Video Object Tracking \* Text Extraction + - Tables + + Returns: + Callable[[~.BatchPredictRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_predict" not in self._stubs: + self._stubs["batch_predict"] = self.grpc_channel.unary_unary( + "/google.cloud.automl.v1beta1.PredictionService/BatchPredict", + request_serializer=prediction_service.BatchPredictRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["batch_predict"] + + +__all__ = ("PredictionServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/automl_v1/gapic/__init__.py b/google/cloud/automl_v1beta1/services/tables/__init__.py similarity index 100% rename from google/cloud/automl_v1/gapic/__init__.py rename to google/cloud/automl_v1beta1/services/tables/__init__.py diff --git a/google/cloud/automl_v1beta1/tables/gcs_client.py b/google/cloud/automl_v1beta1/services/tables/gcs_client.py similarity index 100% rename from google/cloud/automl_v1beta1/tables/gcs_client.py rename to google/cloud/automl_v1beta1/services/tables/gcs_client.py diff --git a/google/cloud/automl_v1beta1/tables/tables_client.py b/google/cloud/automl_v1beta1/services/tables/tables_client.py similarity index 90% rename from google/cloud/automl_v1beta1/tables/tables_client.py rename to google/cloud/automl_v1beta1/services/tables/tables_client.py index f0a1678e..21028a36 100644 --- a/google/cloud/automl_v1beta1/tables/tables_client.py +++ b/google/cloud/automl_v1beta1/services/tables/tables_client.py @@ -16,16 +16,20 @@ """A tables helper for the google.cloud.automl_v1beta1 AutoML API""" -import pkg_resources +import copy import logging +import pkg_resources import six from google.api_core.gapic_v1 import client_info + from google.api_core import exceptions -from google.cloud.automl_v1beta1 import gapic -from google.cloud.automl_v1beta1.proto import data_items_pb2 -from google.cloud.automl_v1beta1.tables import gcs_client +from google.cloud.automl_v1beta1 import AutoMlClient +from google.cloud.automl_v1beta1 import PredictionServiceClient +from google.cloud.automl_v1beta1.types import data_items +from google.cloud.automl_v1beta1.services.tables import gcs_client from google.protobuf import struct_pb2 +import google.cloud.automl_v1beta1 _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl").version @@ -98,13 +102,14 @@ class TablesClient(object): def __init__( self, + *, project=None, region="us-central1", credentials=None, client=None, prediction_client=None, gcs_client=None, - **kwargs + **kwargs, ): """Constructor. @@ -138,17 +143,7 @@ def __init__( default to. Most methods take `region` as an optional parameter, and can override your choice of `region` supplied here. Note, only `us-central1` is supported to-date. - transport (Union[~.AutoMlGrpcTransport, Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]): - A transport instance, responsible for actually making the API - calls. The default transport uses the gRPC protocol. This - argument may also be a callable which returns a transport - instance. Callables will be sent the credentials as the first - argument and the default transport class as the second - argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The + credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the @@ -156,11 +151,17 @@ def __init__( This argument is mutually exclusive with providing a transport instance to ``transport``; doing so will raise an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. + client (Optional[google.automl_v1beta1.AutoMlClient]): An AutoMl Client + to use for requests. + prediction_client (Optional[google.automl_v1beta1.PredictionClient]): A + Prediction Client to use for requests. + gcs_client (Optional[google.automl_v1beta1.GcsClient]): A Storage client + to use for requests. client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. + Custom options for the client. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. """ version = _GAPIC_LIBRARY_VERSION user_agent = "automl-tables-wrapper/{}".format(version) @@ -176,14 +177,14 @@ def __init__( kwargs.pop("client_info", None) if client is None: - self.auto_ml_client = gapic.auto_ml_client.AutoMlClient( + self.auto_ml_client = AutoMlClient( credentials=credentials, client_info=client_info_, **kwargs ) else: self.auto_ml_client = client if prediction_client is None: - self.prediction_client = gapic.prediction_service_client.PredictionServiceClient( + self.prediction_client = PredictionServiceClient( credentials=credentials, client_info=client_info_, **kwargs ) else: @@ -216,7 +217,7 @@ def __lookup_by_display_name(self, object_type, items, display_name): ) ) - def __location_path(self, project=None, region=None): + def __location_path(self, *, project=None, region=None): if project is None: if self.project is None: raise ValueError( @@ -235,7 +236,7 @@ def __location_path(self, project=None, region=None): ) region = self.region - return self.auto_ml_client.location_path(project, region) + return f"projects/{project}/locations/{region}" # the returned metadata object doesn't allow for updating fields, so # we need to manually copy user-updated fields over @@ -250,12 +251,12 @@ def __update_metadata(self, metadata, k, v): def __dataset_from_args( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, project=None, region=None, - **kwargs ): if dataset is None and dataset_display_name is None and dataset_name is None: raise ValueError( @@ -272,17 +273,16 @@ def __dataset_from_args( dataset_name=dataset_name, project=project, region=region, - **kwargs ) def __model_from_args( self, + *, model=None, model_display_name=None, model_name=None, project=None, region=None, - **kwargs ): if model is None and model_display_name is None and model_name is None: raise ValueError( @@ -298,17 +298,16 @@ def __model_from_args( model_name=model_name, project=project, region=region, - **kwargs ) def __dataset_name_from_args( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, project=None, region=None, - **kwargs ): if dataset is None and dataset_display_name is None and dataset_name is None: raise ValueError( @@ -322,26 +321,23 @@ def __dataset_name_from_args( dataset_display_name=dataset_display_name, project=project, region=region, - **kwargs ) dataset_name = dataset.name else: # we do this to force a NotFound error when needed - self.get_dataset( - dataset_name=dataset_name, project=project, region=region, **kwargs - ) + self.get_dataset(dataset_name=dataset_name, project=project, region=region) return dataset_name def __table_spec_name_from_args( self, + *, table_spec_index=0, dataset=None, dataset_display_name=None, dataset_name=None, project=None, region=None, - **kwargs ): dataset_name = self.__dataset_name_from_args( dataset=dataset, @@ -349,24 +345,21 @@ def __table_spec_name_from_args( dataset_display_name=dataset_display_name, project=project, region=region, - **kwargs ) - table_specs = [ - t for t in self.list_table_specs(dataset_name=dataset_name, **kwargs) - ] + table_specs = [t for t in self.list_table_specs(dataset_name=dataset_name)] table_spec_full_id = table_specs[table_spec_index].name return table_spec_full_id def __model_name_from_args( self, + *, model=None, model_display_name=None, model_name=None, project=None, region=None, - **kwargs ): if model is None and model_display_name is None and model_name is None: raise ValueError( @@ -379,14 +372,11 @@ def __model_name_from_args( model_display_name=model_display_name, project=project, region=region, - **kwargs ) model_name = model.name else: # we do this to force a NotFound error when needed - self.get_model( - model_name=model_name, project=project, region=region, **kwargs - ) + self.get_model(model_name=model_name, project=project, region=region) return model_name def __log_operation_info(self, message, op): @@ -414,6 +404,7 @@ def __log_operation_info(self, message, op): def __column_spec_name_from_args( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, @@ -423,7 +414,6 @@ def __column_spec_name_from_args( column_spec_display_name=None, project=None, region=None, - **kwargs ): column_specs = self.list_column_specs( dataset=dataset, @@ -433,7 +423,6 @@ def __column_spec_name_from_args( table_spec_index=table_spec_index, project=project, region=region, - **kwargs ) if column_spec_display_name is not None: column_specs = {s.display_name: s for s in column_specs} @@ -478,7 +467,30 @@ def __ensure_gcs_client_is_initialized(self, credentials, project): project=project, credentials=credentials ) - def list_datasets(self, project=None, region=None, **kwargs): + def __process_request_kwargs(self, request, **kwargs): + """Add request kwargs to the request and return remaining kwargs. + + Some kwargs are for the request object and others are for + the method itself (retry, metdata). + + Args: + request (proto.Message) The request object. + + Returns: + dict: kwargs to be added to the method. + """ + + method_kwargs = copy.deepcopy(kwargs) + for key, value in kwargs.items(): + try: + setattr(request, key, value) + method_kwargs.pop(key) + except KeyError: + continue + + return method_kwargs + + def list_datasets(self, *, project=None, region=None, **kwargs): """List all datasets in a particular project and region. Example: @@ -520,17 +532,23 @@ def list_datasets(self, project=None, region=None, **kwargs): to a retryable error and retry attempts failed. ValueError: If required parameters are missing. """ - return self.auto_ml_client.list_datasets( - self.__location_path(project=project, region=region), **kwargs + + request = google.cloud.automl_v1beta1.ListDatasetsRequest( + parent=self.__location_path(project=project, region=region), ) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + + return self.auto_ml_client.list_datasets(request=request, **method_kwargs) + def get_dataset( self, + *, project=None, region=None, dataset_name=None, dataset_display_name=None, - **kwargs + **kwargs, ): """Gets a single dataset in a particular project and region. @@ -583,16 +601,19 @@ def get_dataset( ) if dataset_name is not None: - return self.auto_ml_client.get_dataset(dataset_name, **kwargs) + request = google.cloud.automl_v1beta1.GetDatasetRequest(name=dataset_name,) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + + return self.auto_ml_client.get_dataset(request=request, **method_kwargs) return self.__lookup_by_display_name( "dataset", - self.list_datasets(project, region, **kwargs), + self.list_datasets(project=project, region=region), dataset_display_name, ) def create_dataset( - self, dataset_display_name, metadata={}, project=None, region=None, **kwargs + self, dataset_display_name, *, metadata={}, project=None, region=None, **kwargs ): """Create a dataset. Keep in mind, importing data is a separate step. @@ -630,20 +651,26 @@ def create_dataset( to a retryable error and retry attempts failed. ValueError: If required parameters are missing. """ - return self.auto_ml_client.create_dataset( - self.__location_path(project, region), - {"display_name": dataset_display_name, "tables_dataset_metadata": metadata}, - **kwargs + request = google.cloud.automl_v1beta1.CreateDatasetRequest( + parent=self.__location_path(project=project, region=region), + dataset={ + "display_name": dataset_display_name, + "tables_dataset_metadata": metadata, + }, ) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + + return self.auto_ml_client.create_dataset(request=request, **method_kwargs) def delete_dataset( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, project=None, region=None, - **kwargs + **kwargs, ): """Deletes a dataset. This does not delete any models trained on this dataset. @@ -703,18 +730,20 @@ def delete_dataset( dataset_display_name=dataset_display_name, project=project, region=region, - **kwargs ) # delete is idempotent except exceptions.NotFound: return None - op = self.auto_ml_client.delete_dataset(dataset_name, **kwargs) + request = google.cloud.automl_v1beta1.DeleteDatasetRequest(name=dataset_name,) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + op = self.auto_ml_client.delete_dataset(request=request, **method_kwargs) self.__log_operation_info("Delete dataset", op) return op def import_data( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, @@ -724,7 +753,7 @@ def import_data( project=None, region=None, credentials=None, - **kwargs + **kwargs, ): """Imports data into a dataset. @@ -808,7 +837,6 @@ def import_data( dataset_display_name=dataset_display_name, project=project, region=region, - **kwargs ) request = {} @@ -832,12 +860,18 @@ def import_data( "One of 'gcs_input_uris', or 'bigquery_input_uri', or 'pandas_dataframe' must be set." ) - op = self.auto_ml_client.import_data(dataset_name, request, **kwargs) + req = google.cloud.automl_v1beta1.ImportDataRequest( + name=dataset_name, input_config=request + ) + method_kwargs = self.__process_request_kwargs(req, **kwargs) + + op = self.auto_ml_client.import_data(request=req, **method_kwargs) self.__log_operation_info("Data import", op) return op def export_data( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, @@ -845,7 +879,7 @@ def export_data( bigquery_output_uri=None, project=None, region=None, - **kwargs + **kwargs, ): """Exports data from a dataset. @@ -915,7 +949,6 @@ def export_data( dataset_display_name=dataset_display_name, project=project, region=region, - **kwargs ) request = {} @@ -928,11 +961,16 @@ def export_data( "One of 'gcs_output_uri_prefix', or 'bigquery_output_uri' must be set." ) - op = self.auto_ml_client.export_data(dataset_name, request, **kwargs) + req = google.cloud.automl_v1beta1.ExportDataRequest( + name=dataset_name, output_config=request + ) + + method_kwargs = self.__process_request_kwargs(req, **kwargs) + op = self.auto_ml_client.export_data(request=req, **method_kwargs) self.__log_operation_info("Export data", op) return op - def get_table_spec(self, table_spec_name, project=None, region=None, **kwargs): + def get_table_spec(self, table_spec_name, *, project=None, region=None, **kwargs): """Gets a single table spec in a particular project and region. Example: @@ -970,16 +1008,20 @@ def get_table_spec(self, table_spec_name, project=None, region=None, **kwargs): to a retryable error and retry attempts failed. ValueError: If required parameters are missing. """ - return self.auto_ml_client.get_table_spec(table_spec_name, **kwargs) + request = google.cloud.automl_v1beta1.GetTableSpecRequest(name=table_spec_name,) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + + return self.auto_ml_client.get_table_spec(request=request, **method_kwargs) def list_table_specs( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, project=None, region=None, - **kwargs + **kwargs, ): """Lists table specs. @@ -1039,12 +1081,15 @@ def list_table_specs( dataset_display_name=dataset_display_name, project=project, region=region, - **kwargs ) - return self.auto_ml_client.list_table_specs(dataset_name, **kwargs) + request = google.cloud.automl_v1beta1.ListTableSpecsRequest( + parent=dataset_name, + ) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + return self.auto_ml_client.list_table_specs(request=request, **method_kwargs) - def get_column_spec(self, column_spec_name, project=None, region=None, **kwargs): + def get_column_spec(self, column_spec_name, *, project=None, region=None, **kwargs): """Gets a single column spec in a particular project and region. Example: @@ -1082,10 +1127,15 @@ def get_column_spec(self, column_spec_name, project=None, region=None, **kwargs) to a retryable error and retry attempts failed. ValueError: If required parameters are missing. """ - return self.auto_ml_client.get_column_spec(column_spec_name, **kwargs) + request = google.cloud.automl_v1beta1.GetColumnSpecRequest( + name=column_spec_name, + ) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + return self.auto_ml_client.get_column_spec(request=request, **method_kwargs) def list_column_specs( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, @@ -1093,7 +1143,7 @@ def list_column_specs( table_spec_index=0, project=None, region=None, - **kwargs + **kwargs, ): """Lists column specs. @@ -1171,16 +1221,21 @@ def list_column_specs( dataset_name=dataset_name, project=project, region=region, - **kwargs ) ] table_spec_name = table_specs[table_spec_index].name - return self.auto_ml_client.list_column_specs(table_spec_name, **kwargs) + request = google.cloud.automl_v1beta1.ListColumnSpecsRequest( + parent=table_spec_name, + ) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + + return self.auto_ml_client.list_column_specs(request=request, **method_kwargs) def update_column_spec( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, @@ -1192,7 +1247,7 @@ def update_column_spec( nullable=None, project=None, region=None, - **kwargs + **kwargs, ): """Updates a column's specs. @@ -1282,7 +1337,6 @@ def update_column_spec( column_spec_display_name=column_spec_display_name, project=project, region=region, - **kwargs ) # type code must always be set @@ -1299,7 +1353,6 @@ def update_column_spec( table_spec_index=table_spec_index, project=project, region=region, - **kwargs ) }[column_spec_name].data_type.type_code @@ -1307,14 +1360,18 @@ def update_column_spec( if nullable is not None: data_type["nullable"] = nullable - data_type["type_code"] = type_code + data_type["type_code"] = google.cloud.automl_v1beta1.TypeCode(type_code) - request = {"name": column_spec_name, "data_type": data_type} + request = google.cloud.automl_v1beta1.UpdateColumnSpecRequest( + column_spec={"name": column_spec_name, "data_type": data_type} + ) + method_kwargs = self.__process_request_kwargs(request, **kwargs) - return self.auto_ml_client.update_column_spec(request, **kwargs) + return self.auto_ml_client.update_column_spec(request=request, **method_kwargs) def set_target_column( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, @@ -1324,7 +1381,7 @@ def set_target_column( column_spec_display_name=None, project=None, region=None, - **kwargs + **kwargs, ): """Sets the target column for a given table. @@ -1409,7 +1466,6 @@ def set_target_column( column_spec_display_name=column_spec_display_name, project=project, region=region, - **kwargs ) column_spec_id = column_spec_name.rsplit("/", 1)[-1] @@ -1419,19 +1475,22 @@ def set_target_column( dataset_display_name=dataset_display_name, project=project, region=region, - **kwargs ) metadata = dataset.tables_dataset_metadata metadata = self.__update_metadata( metadata, "target_column_spec_id", column_spec_id ) - request = {"name": dataset.name, "tables_dataset_metadata": metadata} + request = google.cloud.automl_v1beta1.UpdateDatasetRequest( + dataset={"name": dataset.name, "tables_dataset_metadata": metadata} + ) + method_kwargs = self.__process_request_kwargs(request, **kwargs) - return self.auto_ml_client.update_dataset(request, **kwargs) + return self.auto_ml_client.update_dataset(request=request, **method_kwargs) def set_time_column( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, @@ -1441,7 +1500,7 @@ def set_time_column( column_spec_display_name=None, project=None, region=None, - **kwargs + **kwargs, ): """Sets the time column which designates which data will be of type timestamp and will be used for the timeseries data. @@ -1524,7 +1583,6 @@ def set_time_column( column_spec_display_name=column_spec_display_name, project=project, region=region, - **kwargs ) column_spec_id = column_spec_name.rsplit("/", 1)[-1] @@ -1534,28 +1592,28 @@ def set_time_column( dataset_display_name=dataset_display_name, project=project, region=region, - **kwargs ) - table_spec_full_id = self.__table_spec_name_from_args( - dataset_name=dataset_name, **kwargs - ) + table_spec_full_id = self.__table_spec_name_from_args(dataset_name=dataset_name) - my_table_spec = { - "name": table_spec_full_id, - "time_column_spec_id": column_spec_id, - } - - return self.auto_ml_client.update_table_spec(my_table_spec, **kwargs) + request = google.cloud.automl_v1beta1.UpdateTableSpecRequest( + table_spec={ + "name": table_spec_full_id, + "time_column_spec_id": column_spec_id, + } + ) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + return self.auto_ml_client.update_table_spec(request=request, **method_kwargs) def clear_time_column( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, project=None, region=None, - **kwargs + **kwargs, ): """Clears the time column which designates which data will be of type timestamp and will be used for the timeseries data. @@ -1617,19 +1675,21 @@ def clear_time_column( dataset_display_name=dataset_display_name, project=project, region=region, - **kwargs ) - table_spec_full_id = self.__table_spec_name_from_args( - dataset_name=dataset_name, **kwargs - ) + table_spec_full_id = self.__table_spec_name_from_args(dataset_name=dataset_name) my_table_spec = {"name": table_spec_full_id, "time_column_spec_id": None} - return self.auto_ml_client.update_table_spec(my_table_spec, **kwargs) + request = google.cloud.automl_v1beta1.UpdateTableSpecRequest( + table_spec=my_table_spec + ) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + return self.auto_ml_client.update_table_spec(request=request, **method_kwargs) def set_weight_column( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, @@ -1639,7 +1699,7 @@ def set_weight_column( column_spec_display_name=None, project=None, region=None, - **kwargs + **kwargs, ): """Sets the weight column for a given table. @@ -1724,7 +1784,6 @@ def set_weight_column( column_spec_display_name=column_spec_display_name, project=project, region=region, - **kwargs ) column_spec_id = column_spec_name.rsplit("/", 1)[-1] @@ -1734,25 +1793,29 @@ def set_weight_column( dataset_display_name=dataset_display_name, project=project, region=region, - **kwargs ) metadata = dataset.tables_dataset_metadata metadata = self.__update_metadata( metadata, "weight_column_spec_id", column_spec_id ) - request = {"name": dataset.name, "tables_dataset_metadata": metadata} + request = google.cloud.automl_v1beta1.UpdateDatasetRequest( + dataset={"name": dataset.name, "tables_dataset_metadata": metadata} + ) + + method_kwargs = self.__process_request_kwargs(request, **kwargs) - return self.auto_ml_client.update_dataset(request, **kwargs) + return self.auto_ml_client.update_dataset(request=request, **method_kwargs) def clear_weight_column( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, project=None, region=None, - **kwargs + **kwargs, ): """Clears the weight column for a given dataset. @@ -1815,17 +1878,20 @@ def clear_weight_column( dataset_display_name=dataset_display_name, project=project, region=region, - **kwargs ) metadata = dataset.tables_dataset_metadata metadata = self.__update_metadata(metadata, "weight_column_spec_id", None) - request = {"name": dataset.name, "tables_dataset_metadata": metadata} + request = google.cloud.automl_v1beta1.UpdateDatasetRequest( + dataset={"name": dataset.name, "tables_dataset_metadata": metadata} + ) + method_kwargs = self.__process_request_kwargs(request, **kwargs) - return self.auto_ml_client.update_dataset(request, **kwargs) + return self.auto_ml_client.update_dataset(request=request, **method_kwargs) def set_test_train_column( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, @@ -1835,7 +1901,7 @@ def set_test_train_column( column_spec_display_name=None, project=None, region=None, - **kwargs + **kwargs, ): """Sets the test/train (ml_use) column which designates which data belongs to the test and train sets. This column must be categorical. @@ -1921,7 +1987,6 @@ def set_test_train_column( column_spec_display_name=column_spec_display_name, project=project, region=region, - **kwargs ) column_spec_id = column_spec_name.rsplit("/", 1)[-1] @@ -1931,25 +1996,28 @@ def set_test_train_column( dataset_display_name=dataset_display_name, project=project, region=region, - **kwargs ) metadata = dataset.tables_dataset_metadata metadata = self.__update_metadata( metadata, "ml_use_column_spec_id", column_spec_id ) - request = {"name": dataset.name, "tables_dataset_metadata": metadata} + request = google.cloud.automl_v1beta1.UpdateDatasetRequest( + dataset={"name": dataset.name, "tables_dataset_metadata": metadata} + ) - return self.auto_ml_client.update_dataset(request, **kwargs) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + return self.auto_ml_client.update_dataset(request=request, **method_kwargs) def clear_test_train_column( self, + *, dataset=None, dataset_display_name=None, dataset_name=None, project=None, region=None, - **kwargs + **kwargs, ): """Clears the test/train (ml_use) column which designates which data belongs to the test and train sets. @@ -2013,16 +2081,18 @@ def clear_test_train_column( dataset_display_name=dataset_display_name, project=project, region=region, - **kwargs ) metadata = dataset.tables_dataset_metadata metadata = self.__update_metadata(metadata, "ml_use_column_spec_id", None) - request = {"name": dataset.name, "tables_dataset_metadata": metadata} + request = google.cloud.automl_v1beta1.UpdateDatasetRequest( + dataset={"name": dataset.name, "tables_dataset_metadata": metadata} + ) - return self.auto_ml_client.update_dataset(request, **kwargs) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + return self.auto_ml_client.update_dataset(request=request, **method_kwargs) - def list_models(self, project=None, region=None, **kwargs): + def list_models(self, *, project=None, region=None, **kwargs): """List all models in a particular project and region. Example: @@ -2064,18 +2134,23 @@ def list_models(self, project=None, region=None, **kwargs): to a retryable error and retry attempts failed. ValueError: If required parameters are missing. """ - return self.auto_ml_client.list_models( - self.__location_path(project=project, region=region), **kwargs + + request = google.cloud.automl_v1beta1.ListModelsRequest( + parent=self.__location_path(project=project, region=region), ) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + return self.auto_ml_client.list_models(request=request, **method_kwargs) + def list_model_evaluations( self, + *, project=None, region=None, model=None, model_display_name=None, model_name=None, - **kwargs + **kwargs, ): """List all model evaluations for a given model. @@ -2143,14 +2218,20 @@ def list_model_evaluations( model_display_name=model_display_name, project=project, region=region, - **kwargs ) - return self.auto_ml_client.list_model_evaluations(model_name, **kwargs) + request = google.cloud.automl_v1beta1.ListModelEvaluationsRequest( + parent=model_name, + ) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + return self.auto_ml_client.list_model_evaluations( + request=request, **method_kwargs + ) def create_model( self, model_display_name, + *, dataset=None, dataset_display_name=None, dataset_name=None, @@ -2162,7 +2243,7 @@ def create_model( include_column_spec_names=None, exclude_column_spec_names=None, disable_early_stopping=False, - **kwargs + **kwargs, ): """Create a model. This will train your model on the given dataset. @@ -2266,7 +2347,6 @@ def create_model( dataset_display_name=dataset_display_name, project=project, region=region, - **kwargs ) model_metadata["train_budget_milli_node_hours"] = train_budget_milli_node_hours @@ -2282,7 +2362,6 @@ def create_model( dataset=dataset, dataset_name=dataset_name, dataset_display_name=dataset_display_name, - **kwargs ) ] @@ -2300,26 +2379,31 @@ def create_model( model_metadata["input_feature_column_specs"] = final_columns - request = { - "display_name": model_display_name, - "dataset_id": dataset_id, - "tables_model_metadata": model_metadata, - } - - op = self.auto_ml_client.create_model( - self.__location_path(project=project, region=region), request, **kwargs + req = google.cloud.automl_v1beta1.CreateModelRequest( + parent=self.__location_path(project=project, region=region), + model=google.cloud.automl_v1beta1.Model( + display_name=model_display_name, + dataset_id=dataset_id, + tables_model_metadata=google.cloud.automl_v1beta1.TablesModelMetadata( + model_metadata + ), + ), ) + + method_kwargs = self.__process_request_kwargs(req, **kwargs) + op = self.auto_ml_client.create_model(request=req, **method_kwargs) self.__log_operation_info("Model creation", op) return op def delete_model( self, + *, model=None, model_display_name=None, model_name=None, project=None, region=None, - **kwargs + **kwargs, ): """Deletes a model. Note this will not delete any datasets associated with this model. @@ -2379,18 +2463,19 @@ def delete_model( model_display_name=model_display_name, project=project, region=region, - **kwargs ) # delete is idempotent except exceptions.NotFound: return None - op = self.auto_ml_client.delete_model(model_name, **kwargs) + request = google.cloud.automl_v1beta1.DeleteModelRequest(name=model_name) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + op = self.auto_ml_client.delete_model(request=request, **method_kwargs) self.__log_operation_info("Delete model", op) return op def get_model_evaluation( - self, model_evaluation_name, project=None, region=None, **kwargs + self, model_evaluation_name, *, project=None, region=None, **kwargs ): """Gets a single evaluation model in a particular project and region. @@ -2429,15 +2514,22 @@ def get_model_evaluation( to a retryable error and retry attempts failed. ValueError: If required parameters are missing. """ - return self.auto_ml_client.get_model_evaluation(model_evaluation_name, **kwargs) + request = google.cloud.automl_v1beta1.GetModelEvaluationRequest( + name=model_evaluation_name + ) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + return self.auto_ml_client.get_model_evaluation( + request=request, **method_kwargs + ) def get_model( self, + *, project=None, region=None, model_name=None, model_display_name=None, - **kwargs + **kwargs, ): """Gets a single model in a particular project and region. @@ -2489,21 +2581,24 @@ def get_model( ) if model_name is not None: - return self.auto_ml_client.get_model(model_name, **kwargs) + return self.auto_ml_client.get_model(name=model_name) return self.__lookup_by_display_name( - "model", self.list_models(project, region, **kwargs), model_display_name + "model", + self.list_models(project=project, region=region), + model_display_name, ) # TODO(jonathanskim): allow deployment from just model ID def deploy_model( self, + *, model=None, model_name=None, model_display_name=None, project=None, region=None, - **kwargs + **kwargs, ): """Deploys a model. This allows you make online predictions using the model you've deployed. @@ -2562,21 +2657,24 @@ def deploy_model( model_display_name=model_display_name, project=project, region=region, - **kwargs ) - op = self.auto_ml_client.deploy_model(model_name, **kwargs) + request = google.cloud.automl_v1beta1.DeployModelRequest(name=model_name) + + method_kwargs = self.__process_request_kwargs(request, **kwargs) + op = self.auto_ml_client.deploy_model(request=request, **method_kwargs) self.__log_operation_info("Deploy model", op) return op def undeploy_model( self, + *, model=None, model_name=None, model_display_name=None, project=None, region=None, - **kwargs + **kwargs, ): """Undeploys a model. @@ -2634,10 +2732,11 @@ def undeploy_model( model_display_name=model_display_name, project=project, region=region, - **kwargs ) - op = self.auto_ml_client.undeploy_model(model_name, **kwargs) + request = google.cloud.automl_v1beta1.UndeployModelRequest(name=model_name) + method_kwargs = self.__process_request_kwargs(request=request, **kwargs) + op = self.auto_ml_client.undeploy_model(request=request, **method_kwargs) self.__log_operation_info("Undeploy model", op) return op @@ -2645,13 +2744,14 @@ def undeploy_model( def predict( self, inputs, + *, model=None, model_name=None, model_display_name=None, feature_importance=False, project=None, region=None, - **kwargs + **kwargs, ): """Makes a prediction on a deployed model. This will fail if the model was not deployed. @@ -2716,7 +2816,6 @@ def predict( model_display_name=model_display_name, project=project, region=region, - **kwargs ) column_specs = model.tables_model_metadata.input_feature_column_specs @@ -2739,17 +2838,28 @@ def predict( raise ValueError(err) values.append(value_type) - row = data_items_pb2.Row(values=values) - payload = data_items_pb2.ExamplePayload(row=row) + row = data_items.Row() + + # append each row value separately until issue is resovled + # https://github.com/googleapis/proto-plus-python/issues/104 + for v in values: + row.values.append(v) + + payload = data_items.ExamplePayload(row=row) params = None if feature_importance: params = {"feature_importance": "true"} - return self.prediction_client.predict(model.name, payload, params, **kwargs) + request = google.cloud.automl_v1beta1.PredictRequest( + name=model.name, payload=payload, params=params, + ) + method_kwargs = self.__process_request_kwargs(request, **kwargs) + return self.prediction_client.predict(request=request, **method_kwargs) def batch_predict( self, + *, pandas_dataframe=None, bigquery_input_uri=None, bigquery_output_uri=None, @@ -2763,7 +2873,7 @@ def batch_predict( credentials=None, inputs=None, params={}, - **kwargs + **kwargs, ): """Makes a batch prediction on a model. This does _not_ require the model to be deployed. @@ -2851,7 +2961,6 @@ def batch_predict( model_display_name=model_display_name, project=project, region=region, - **kwargs ) input_request = None @@ -2889,8 +2998,11 @@ def batch_predict( "One of 'gcs_output_uri_prefix'/'bigquery_output_uri' must be set" ) - op = self.prediction_client.batch_predict( - model_name, input_request, output_request, params, **kwargs + req = google.cloud.automl_v1beta1.BatchPredictRequest( + name=model_name, input_config=input_request, output_config=output_request, ) + + method_kwargs = self.__process_request_kwargs(req, **kwargs) + op = self.prediction_client.batch_predict(request=req, **method_kwargs) self.__log_operation_info("Batch predict", op) return op diff --git a/google/cloud/automl_v1beta1/tables/__init__.py b/google/cloud/automl_v1beta1/tables/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/automl_v1beta1/types.py b/google/cloud/automl_v1beta1/types.py deleted file mode 100644 index 2e9bdd77..00000000 --- a/google/cloud/automl_v1beta1/types.py +++ /dev/null @@ -1,114 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.automl_v1beta1.proto import annotation_payload_pb2 -from google.cloud.automl_v1beta1.proto import annotation_spec_pb2 -from google.cloud.automl_v1beta1.proto import classification_pb2 -from google.cloud.automl_v1beta1.proto import column_spec_pb2 -from google.cloud.automl_v1beta1.proto import data_items_pb2 -from google.cloud.automl_v1beta1.proto import data_stats_pb2 -from google.cloud.automl_v1beta1.proto import data_types_pb2 -from google.cloud.automl_v1beta1.proto import dataset_pb2 -from google.cloud.automl_v1beta1.proto import detection_pb2 -from google.cloud.automl_v1beta1.proto import geometry_pb2 -from google.cloud.automl_v1beta1.proto import image_pb2 -from google.cloud.automl_v1beta1.proto import io_pb2 -from google.cloud.automl_v1beta1.proto import model_evaluation_pb2 -from google.cloud.automl_v1beta1.proto import model_pb2 -from google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2 -from google.cloud.automl_v1beta1.proto import prediction_service_pb2 -from google.cloud.automl_v1beta1.proto import ranges_pb2 -from google.cloud.automl_v1beta1.proto import regression_pb2 -from google.cloud.automl_v1beta1.proto import service_pb2 -from google.cloud.automl_v1beta1.proto import table_spec_pb2 -from google.cloud.automl_v1beta1.proto import tables_pb2 -from google.cloud.automl_v1beta1.proto import temporal_pb2 -from google.cloud.automl_v1beta1.proto import text_extraction_pb2 -from google.cloud.automl_v1beta1.proto import text_pb2 -from google.cloud.automl_v1beta1.proto import text_segment_pb2 -from google.cloud.automl_v1beta1.proto import text_sentiment_pb2 -from google.cloud.automl_v1beta1.proto import translation_pb2 -from google.cloud.automl_v1beta1.proto import video_pb2 -from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import any_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import struct_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 - - -_shared_modules = [ - longrunning_operations_pb2, - any_pb2, - duration_pb2, - field_mask_pb2, - struct_pb2, - timestamp_pb2, - status_pb2, -] - -_local_modules = [ - annotation_payload_pb2, - annotation_spec_pb2, - classification_pb2, - column_spec_pb2, - data_items_pb2, - data_stats_pb2, - data_types_pb2, - dataset_pb2, - detection_pb2, - geometry_pb2, - image_pb2, - io_pb2, - model_evaluation_pb2, - model_pb2, - proto_operations_pb2, - prediction_service_pb2, - ranges_pb2, - regression_pb2, - service_pb2, - table_spec_pb2, - tables_pb2, - temporal_pb2, - text_extraction_pb2, - text_pb2, - text_segment_pb2, - text_sentiment_pb2, - translation_pb2, - video_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.automl_v1beta1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/google/cloud/automl_v1beta1/types/__init__.py b/google/cloud/automl_v1beta1/types/__init__.py new file mode 100644 index 00000000..679be5f4 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/__init__.py @@ -0,0 +1,295 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .temporal import TimeSegment +from .classification import ( + ClassificationAnnotation, + VideoClassificationAnnotation, + ClassificationEvaluationMetrics, +) +from .geometry import ( + NormalizedVertex, + BoundingPoly, +) +from .detection import ( + ImageObjectDetectionAnnotation, + VideoObjectTrackingAnnotation, + BoundingBoxMetricsEntry, + ImageObjectDetectionEvaluationMetrics, + VideoObjectTrackingEvaluationMetrics, +) +from .data_stats import ( + DataStats, + Float64Stats, + StringStats, + TimestampStats, + ArrayStats, + StructStats, + CategoryStats, + CorrelationStats, +) +from .data_types import ( + DataType, + StructType, +) +from .column_spec import ColumnSpec +from .io import ( + InputConfig, + BatchPredictInputConfig, + DocumentInputConfig, + OutputConfig, + BatchPredictOutputConfig, + ModelExportOutputConfig, + ExportEvaluatedExamplesOutputConfig, + GcsSource, + BigQuerySource, + GcsDestination, + BigQueryDestination, + GcrDestination, +) +from .text_segment import TextSegment +from .data_items import ( + Image, + TextSnippet, + DocumentDimensions, + Document, + Row, + ExamplePayload, +) +from .ranges import DoubleRange +from .regression import RegressionEvaluationMetrics +from .tables import ( + TablesDatasetMetadata, + TablesModelMetadata, + TablesAnnotation, + TablesModelColumnInfo, +) +from .text_extraction import ( + TextExtractionAnnotation, + TextExtractionEvaluationMetrics, +) +from .text_sentiment import ( + TextSentimentAnnotation, + TextSentimentEvaluationMetrics, +) +from .translation import ( + TranslationDatasetMetadata, + TranslationEvaluationMetrics, + TranslationModelMetadata, + TranslationAnnotation, +) +from .annotation_payload import AnnotationPayload +from .annotation_spec import AnnotationSpec +from .image import ( + ImageClassificationDatasetMetadata, + ImageObjectDetectionDatasetMetadata, + ImageClassificationModelMetadata, + ImageObjectDetectionModelMetadata, + ImageClassificationModelDeploymentMetadata, + ImageObjectDetectionModelDeploymentMetadata, +) +from .text import ( + TextClassificationDatasetMetadata, + TextClassificationModelMetadata, + TextExtractionDatasetMetadata, + TextExtractionModelMetadata, + TextSentimentDatasetMetadata, + TextSentimentModelMetadata, +) +from .video import ( + VideoClassificationDatasetMetadata, + VideoObjectTrackingDatasetMetadata, + VideoClassificationModelMetadata, + VideoObjectTrackingModelMetadata, +) +from .dataset import Dataset +from .model import Model +from .model_evaluation import ModelEvaluation +from .operations import ( + OperationMetadata, + DeleteOperationMetadata, + DeployModelOperationMetadata, + UndeployModelOperationMetadata, + CreateModelOperationMetadata, + ImportDataOperationMetadata, + ExportDataOperationMetadata, + BatchPredictOperationMetadata, + ExportModelOperationMetadata, + ExportEvaluatedExamplesOperationMetadata, +) +from .prediction_service import ( + PredictRequest, + PredictResponse, + BatchPredictRequest, + BatchPredictResult, +) +from .table_spec import TableSpec +from .service import ( + CreateDatasetRequest, + GetDatasetRequest, + ListDatasetsRequest, + ListDatasetsResponse, + UpdateDatasetRequest, + DeleteDatasetRequest, + ImportDataRequest, + ExportDataRequest, + GetAnnotationSpecRequest, + GetTableSpecRequest, + ListTableSpecsRequest, + ListTableSpecsResponse, + UpdateTableSpecRequest, + GetColumnSpecRequest, + ListColumnSpecsRequest, + ListColumnSpecsResponse, + UpdateColumnSpecRequest, + CreateModelRequest, + GetModelRequest, + ListModelsRequest, + ListModelsResponse, + DeleteModelRequest, + DeployModelRequest, + UndeployModelRequest, + ExportModelRequest, + ExportEvaluatedExamplesRequest, + GetModelEvaluationRequest, + ListModelEvaluationsRequest, + ListModelEvaluationsResponse, +) + + +__all__ = ( + "TimeSegment", + "ClassificationAnnotation", + "VideoClassificationAnnotation", + "ClassificationEvaluationMetrics", + "NormalizedVertex", + "BoundingPoly", + "ImageObjectDetectionAnnotation", + "VideoObjectTrackingAnnotation", + "BoundingBoxMetricsEntry", + "ImageObjectDetectionEvaluationMetrics", + "VideoObjectTrackingEvaluationMetrics", + "DataStats", + "Float64Stats", + "StringStats", + "TimestampStats", + "ArrayStats", + "StructStats", + "CategoryStats", + "CorrelationStats", + "DataType", + "StructType", + "ColumnSpec", + "InputConfig", + "BatchPredictInputConfig", + "DocumentInputConfig", + "OutputConfig", + "BatchPredictOutputConfig", + "ModelExportOutputConfig", + "ExportEvaluatedExamplesOutputConfig", + "GcsSource", + "BigQuerySource", + "GcsDestination", + "BigQueryDestination", + "GcrDestination", + "TextSegment", + "Image", + "TextSnippet", + "DocumentDimensions", + "Document", + "Row", + "ExamplePayload", + "DoubleRange", + "RegressionEvaluationMetrics", + "TablesDatasetMetadata", + "TablesModelMetadata", + "TablesAnnotation", + "TablesModelColumnInfo", + "TextExtractionAnnotation", + "TextExtractionEvaluationMetrics", + "TextSentimentAnnotation", + "TextSentimentEvaluationMetrics", + "TranslationDatasetMetadata", + "TranslationEvaluationMetrics", + "TranslationModelMetadata", + "TranslationAnnotation", + "AnnotationPayload", + "AnnotationSpec", + "ImageClassificationDatasetMetadata", + "ImageObjectDetectionDatasetMetadata", + "ImageClassificationModelMetadata", + "ImageObjectDetectionModelMetadata", + "ImageClassificationModelDeploymentMetadata", + "ImageObjectDetectionModelDeploymentMetadata", + "TextClassificationDatasetMetadata", + "TextClassificationModelMetadata", + "TextExtractionDatasetMetadata", + "TextExtractionModelMetadata", + "TextSentimentDatasetMetadata", + "TextSentimentModelMetadata", + "VideoClassificationDatasetMetadata", + "VideoObjectTrackingDatasetMetadata", + "VideoClassificationModelMetadata", + "VideoObjectTrackingModelMetadata", + "Dataset", + "Model", + "ModelEvaluation", + "OperationMetadata", + "DeleteOperationMetadata", + "DeployModelOperationMetadata", + "UndeployModelOperationMetadata", + "CreateModelOperationMetadata", + "ImportDataOperationMetadata", + "ExportDataOperationMetadata", + "BatchPredictOperationMetadata", + "ExportModelOperationMetadata", + "ExportEvaluatedExamplesOperationMetadata", + "PredictRequest", + "PredictResponse", + "BatchPredictRequest", + "BatchPredictResult", + "TableSpec", + "CreateDatasetRequest", + "GetDatasetRequest", + "ListDatasetsRequest", + "ListDatasetsResponse", + "UpdateDatasetRequest", + "DeleteDatasetRequest", + "ImportDataRequest", + "ExportDataRequest", + "GetAnnotationSpecRequest", + "GetTableSpecRequest", + "ListTableSpecsRequest", + "ListTableSpecsResponse", + "UpdateTableSpecRequest", + "GetColumnSpecRequest", + "ListColumnSpecsRequest", + "ListColumnSpecsResponse", + "UpdateColumnSpecRequest", + "CreateModelRequest", + "GetModelRequest", + "ListModelsRequest", + "ListModelsResponse", + "DeleteModelRequest", + "DeployModelRequest", + "UndeployModelRequest", + "ExportModelRequest", + "ExportEvaluatedExamplesRequest", + "GetModelEvaluationRequest", + "ListModelEvaluationsRequest", + "ListModelEvaluationsResponse", +) diff --git a/google/cloud/automl_v1beta1/types/annotation_payload.py b/google/cloud/automl_v1beta1/types/annotation_payload.py new file mode 100644 index 00000000..5e5d09cd --- /dev/null +++ b/google/cloud/automl_v1beta1/types/annotation_payload.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import classification as gca_classification +from google.cloud.automl_v1beta1.types import detection +from google.cloud.automl_v1beta1.types import tables as gca_tables +from google.cloud.automl_v1beta1.types import text_extraction as gca_text_extraction +from google.cloud.automl_v1beta1.types import text_sentiment as gca_text_sentiment +from google.cloud.automl_v1beta1.types import translation as gca_translation + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", manifest={"AnnotationPayload",}, +) + + +class AnnotationPayload(proto.Message): + r"""Contains annotation information that is relevant to AutoML. + + Attributes: + translation (~.gca_translation.TranslationAnnotation): + Annotation details for translation. + classification (~.gca_classification.ClassificationAnnotation): + Annotation details for content or image + classification. + image_object_detection (~.detection.ImageObjectDetectionAnnotation): + Annotation details for image object + detection. + video_classification (~.gca_classification.VideoClassificationAnnotation): + Annotation details for video classification. + Returned for Video Classification predictions. + video_object_tracking (~.detection.VideoObjectTrackingAnnotation): + Annotation details for video object tracking. + text_extraction (~.gca_text_extraction.TextExtractionAnnotation): + Annotation details for text extraction. + text_sentiment (~.gca_text_sentiment.TextSentimentAnnotation): + Annotation details for text sentiment. + tables (~.gca_tables.TablesAnnotation): + Annotation details for Tables. + annotation_spec_id (str): + Output only . The resource ID of the + annotation spec that this annotation pertains + to. The annotation spec comes from either an + ancestor dataset, or the dataset that was used + to train the model in use. + display_name (str): + Output only. The value of + [display_name][google.cloud.automl.v1beta1.AnnotationSpec.display_name] + when the model was trained. Because this field returns a + value at model training time, for different models trained + using the same dataset, the returned value could be + different as model owner could update the ``display_name`` + between any two model training. + """ + + translation = proto.Field( + proto.MESSAGE, + number=2, + oneof="detail", + message=gca_translation.TranslationAnnotation, + ) + + classification = proto.Field( + proto.MESSAGE, + number=3, + oneof="detail", + message=gca_classification.ClassificationAnnotation, + ) + + image_object_detection = proto.Field( + proto.MESSAGE, + number=4, + oneof="detail", + message=detection.ImageObjectDetectionAnnotation, + ) + + video_classification = proto.Field( + proto.MESSAGE, + number=9, + oneof="detail", + message=gca_classification.VideoClassificationAnnotation, + ) + + video_object_tracking = proto.Field( + proto.MESSAGE, + number=8, + oneof="detail", + message=detection.VideoObjectTrackingAnnotation, + ) + + text_extraction = proto.Field( + proto.MESSAGE, + number=6, + oneof="detail", + message=gca_text_extraction.TextExtractionAnnotation, + ) + + text_sentiment = proto.Field( + proto.MESSAGE, + number=7, + oneof="detail", + message=gca_text_sentiment.TextSentimentAnnotation, + ) + + tables = proto.Field( + proto.MESSAGE, number=10, oneof="detail", message=gca_tables.TablesAnnotation, + ) + + annotation_spec_id = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=5) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/annotation_spec.py b/google/cloud/automl_v1beta1/types/annotation_spec.py new file mode 100644 index 00000000..bb810acd --- /dev/null +++ b/google/cloud/automl_v1beta1/types/annotation_spec.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", manifest={"AnnotationSpec",}, +) + + +class AnnotationSpec(proto.Message): + r"""A definition of an annotation spec. + + Attributes: + name (str): + Output only. Resource name of the annotation spec. Form: + + 'projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/annotationSpecs/{annotation_spec_id}' + display_name (str): + Required. The name of the annotation spec to show in the + interface. The name can be up to 32 characters long and must + match the regexp ``[a-zA-Z0-9_]+``. + example_count (int): + Output only. The number of examples in the + parent dataset labeled by the annotation spec. + """ + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + example_count = proto.Field(proto.INT32, number=9) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/classification.py b/google/cloud/automl_v1beta1/types/classification.py new file mode 100644 index 00000000..8c879bf2 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/classification.py @@ -0,0 +1,308 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import temporal + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={ + "ClassificationType", + "ClassificationAnnotation", + "VideoClassificationAnnotation", + "ClassificationEvaluationMetrics", + }, +) + + +class ClassificationType(proto.Enum): + r"""Type of the classification problem.""" + CLASSIFICATION_TYPE_UNSPECIFIED = 0 + MULTICLASS = 1 + MULTILABEL = 2 + + +class ClassificationAnnotation(proto.Message): + r"""Contains annotation details specific to classification. + + Attributes: + score (float): + Output only. A confidence estimate between + 0.0 and 1.0. A higher value means greater + confidence that the annotation is positive. If a + user approves an annotation as negative or + positive, the score value remains unchanged. If + a user creates an annotation, the score is 0 for + negative or 1 for positive. + """ + + score = proto.Field(proto.FLOAT, number=1) + + +class VideoClassificationAnnotation(proto.Message): + r"""Contains annotation details specific to video classification. + + Attributes: + type (str): + Output only. Expresses the type of video classification. + Possible values: + + - ``segment`` - Classification done on a specified by user + time segment of a video. AnnotationSpec is answered to be + present in that time segment, if it is present in any + part of it. The video ML model evaluations are done only + for this type of classification. + + - ``shot``- Shot-level classification. AutoML Video + Intelligence determines the boundaries for each camera + shot in the entire segment of the video that user + specified in the request configuration. AutoML Video + Intelligence then returns labels and their confidence + scores for each detected shot, along with the start and + end time of the shot. WARNING: Model evaluation is not + done for this classification type, the quality of it + depends on training data, but there are no metrics + provided to describe that quality. + + - ``1s_interval`` - AutoML Video Intelligence returns + labels and their confidence scores for each second of the + entire segment of the video that user specified in the + request configuration. WARNING: Model evaluation is not + done for this classification type, the quality of it + depends on training data, but there are no metrics + provided to describe that quality. + classification_annotation (~.classification.ClassificationAnnotation): + Output only . The classification details of + this annotation. + time_segment (~.temporal.TimeSegment): + Output only . The time segment of the video + to which the annotation applies. + """ + + type = proto.Field(proto.STRING, number=1) + + classification_annotation = proto.Field( + proto.MESSAGE, number=2, message=ClassificationAnnotation, + ) + + time_segment = proto.Field(proto.MESSAGE, number=3, message=temporal.TimeSegment,) + + +class ClassificationEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for classification problems. Note: For + Video Classification this metrics only describe quality of the Video + Classification predictions of "segment_classification" type. + + Attributes: + au_prc (float): + Output only. The Area Under Precision-Recall + Curve metric. Micro-averaged for the overall + evaluation. + base_au_prc (float): + Output only. The Area Under Precision-Recall + Curve metric based on priors. Micro-averaged for + the overall evaluation. Deprecated. + au_roc (float): + Output only. The Area Under Receiver + Operating Characteristic curve metric. Micro- + averaged for the overall evaluation. + log_loss (float): + Output only. The Log Loss metric. + confidence_metrics_entry (Sequence[~.classification.ClassificationEvaluationMetrics.ConfidenceMetricsEntry]): + Output only. Metrics for each confidence_threshold in + 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and + position_threshold = INT32_MAX_VALUE. ROC and + precision-recall curves, and other aggregated metrics are + derived from them. The confidence metrics entries may also + be supplied for additional values of position_threshold, but + from these no aggregated metrics are computed. + confusion_matrix (~.classification.ClassificationEvaluationMetrics.ConfusionMatrix): + Output only. Confusion matrix of the + evaluation. Only set for MULTICLASS + classification problems where number of labels + is no more than 10. + Only set for model level evaluation, not for + evaluation per label. + annotation_spec_id (Sequence[str]): + Output only. The annotation spec ids used for + this evaluation. + """ + + class ConfidenceMetricsEntry(proto.Message): + r"""Metrics for a single confidence threshold. + + Attributes: + confidence_threshold (float): + Output only. Metrics are computed with an + assumption that the model never returns + predictions with score lower than this value. + position_threshold (int): + Output only. Metrics are computed with an assumption that + the model always returns at most this many predictions + (ordered by their score, descendingly), but they all still + need to meet the confidence_threshold. + recall (float): + Output only. Recall (True Positive Rate) for + the given confidence threshold. + precision (float): + Output only. Precision for the given + confidence threshold. + false_positive_rate (float): + Output only. False Positive Rate for the + given confidence threshold. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + recall_at1 (float): + Output only. The Recall (True Positive Rate) + when only considering the label that has the + highest prediction score and not below the + confidence threshold for each example. + precision_at1 (float): + Output only. The precision when only + considering the label that has the highest + prediction score and not below the confidence + threshold for each example. + false_positive_rate_at1 (float): + Output only. The False Positive Rate when + only considering the label that has the highest + prediction score and not below the confidence + threshold for each example. + f1_score_at1 (float): + Output only. The harmonic mean of + [recall_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] + and + [precision_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1]. + true_positive_count (int): + Output only. The number of model created + labels that match a ground truth label. + false_positive_count (int): + Output only. The number of model created + labels that do not match a ground truth label. + false_negative_count (int): + Output only. The number of ground truth + labels that are not matched by a model created + label. + true_negative_count (int): + Output only. The number of labels that were + not created by the model, but if they would, + they would not match a ground truth label. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + position_threshold = proto.Field(proto.INT32, number=14) + + recall = proto.Field(proto.FLOAT, number=2) + + precision = proto.Field(proto.FLOAT, number=3) + + false_positive_rate = proto.Field(proto.FLOAT, number=8) + + f1_score = proto.Field(proto.FLOAT, number=4) + + recall_at1 = proto.Field(proto.FLOAT, number=5) + + precision_at1 = proto.Field(proto.FLOAT, number=6) + + false_positive_rate_at1 = proto.Field(proto.FLOAT, number=9) + + f1_score_at1 = proto.Field(proto.FLOAT, number=7) + + true_positive_count = proto.Field(proto.INT64, number=10) + + false_positive_count = proto.Field(proto.INT64, number=11) + + false_negative_count = proto.Field(proto.INT64, number=12) + + true_negative_count = proto.Field(proto.INT64, number=13) + + class ConfusionMatrix(proto.Message): + r"""Confusion matrix of the model running the classification. + + Attributes: + annotation_spec_id (Sequence[str]): + Output only. IDs of the annotation specs used in the + confusion matrix. For Tables CLASSIFICATION + + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type] + only list of [annotation_spec_display_name-s][] is + populated. + display_name (Sequence[str]): + Output only. Display name of the annotation specs used in + the confusion matrix, as they were at the moment of the + evaluation. For Tables CLASSIFICATION + + [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type], + distinct values of the target column at the moment of the + model evaluation are populated here. + row (Sequence[~.classification.ClassificationEvaluationMetrics.ConfusionMatrix.Row]): + Output only. Rows in the confusion matrix. The number of + rows is equal to the size of ``annotation_spec_id``. + ``row[i].example_count[j]`` is the number of examples that + have ground truth of the ``annotation_spec_id[i]`` and are + predicted as ``annotation_spec_id[j]`` by the model being + evaluated. + """ + + class Row(proto.Message): + r"""Output only. A row in the confusion matrix. + + Attributes: + example_count (Sequence[int]): + Output only. Value of the specific cell in the confusion + matrix. The number of values each row has (i.e. the length + of the row) is equal to the length of the + ``annotation_spec_id`` field or, if that one is not + populated, length of the + [display_name][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name] + field. + """ + + example_count = proto.RepeatedField(proto.INT32, number=1) + + annotation_spec_id = proto.RepeatedField(proto.STRING, number=1) + + display_name = proto.RepeatedField(proto.STRING, number=3) + + row = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="ClassificationEvaluationMetrics.ConfusionMatrix.Row", + ) + + au_prc = proto.Field(proto.FLOAT, number=1) + + base_au_prc = proto.Field(proto.FLOAT, number=2) + + au_roc = proto.Field(proto.FLOAT, number=6) + + log_loss = proto.Field(proto.FLOAT, number=7) + + confidence_metrics_entry = proto.RepeatedField( + proto.MESSAGE, number=3, message=ConfidenceMetricsEntry, + ) + + confusion_matrix = proto.Field(proto.MESSAGE, number=4, message=ConfusionMatrix,) + + annotation_spec_id = proto.RepeatedField(proto.STRING, number=5) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/column_spec.py b/google/cloud/automl_v1beta1/types/column_spec.py new file mode 100644 index 00000000..3aa86c4b --- /dev/null +++ b/google/cloud/automl_v1beta1/types/column_spec.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import data_stats as gca_data_stats +from google.cloud.automl_v1beta1.types import data_types + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", manifest={"ColumnSpec",}, +) + + +class ColumnSpec(proto.Message): + r"""A representation of a column in a relational table. When listing + them, column specs are returned in the same order in which they were + given on import . Used by: + + - Tables + + Attributes: + name (str): + Output only. The resource name of the column specs. Form: + + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/tableSpecs/{table_spec_id}/columnSpecs/{column_spec_id}`` + data_type (~.data_types.DataType): + The data type of elements stored in the + column. + display_name (str): + Output only. The name of the column to show in the + interface. The name can be up to 100 characters long and can + consist only of ASCII Latin letters A-Z and a-z, ASCII + digits 0-9, underscores(_), and forward slashes(/), and must + start with a letter or a digit. + data_stats (~.gca_data_stats.DataStats): + Output only. Stats of the series of values in the column. + This field may be stale, see the ancestor's + Dataset.tables_dataset_metadata.stats_update_time field for + the timestamp at which these stats were last updated. + top_correlated_columns (Sequence[~.column_spec.ColumnSpec.CorrelatedColumn]): + Deprecated. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + """ + + class CorrelatedColumn(proto.Message): + r"""Identifies the table's column, and its correlation with the + column this ColumnSpec describes. + + Attributes: + column_spec_id (str): + The column_spec_id of the correlated column, which belongs + to the same table as the in-context column. + correlation_stats (~.gca_data_stats.CorrelationStats): + Correlation between this and the in-context + column. + """ + + column_spec_id = proto.Field(proto.STRING, number=1) + + correlation_stats = proto.Field( + proto.MESSAGE, number=2, message=gca_data_stats.CorrelationStats, + ) + + name = proto.Field(proto.STRING, number=1) + + data_type = proto.Field(proto.MESSAGE, number=2, message=data_types.DataType,) + + display_name = proto.Field(proto.STRING, number=3) + + data_stats = proto.Field(proto.MESSAGE, number=4, message=gca_data_stats.DataStats,) + + top_correlated_columns = proto.RepeatedField( + proto.MESSAGE, number=5, message=CorrelatedColumn, + ) + + etag = proto.Field(proto.STRING, number=6) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/data_items.py b/google/cloud/automl_v1beta1/types/data_items.py new file mode 100644 index 00000000..4e0037b0 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/data_items.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import geometry +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import text_segment as gca_text_segment +from google.protobuf import struct_pb2 as struct # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={ + "Image", + "TextSnippet", + "DocumentDimensions", + "Document", + "Row", + "ExamplePayload", + }, +) + + +class Image(proto.Message): + r"""A representation of an image. + Only images up to 30MB in size are supported. + + Attributes: + image_bytes (bytes): + Image content represented as a stream of bytes. Note: As + with all ``bytes`` fields, protobuffers use a pure binary + representation, whereas JSON representations use base64. + input_config (~.io.InputConfig): + An input config specifying the content of the + image. + thumbnail_uri (str): + Output only. HTTP URI to the thumbnail image. + """ + + image_bytes = proto.Field(proto.BYTES, number=1, oneof="data") + + input_config = proto.Field( + proto.MESSAGE, number=6, oneof="data", message=io.InputConfig, + ) + + thumbnail_uri = proto.Field(proto.STRING, number=4) + + +class TextSnippet(proto.Message): + r"""A representation of a text snippet. + + Attributes: + content (str): + Required. The content of the text snippet as + a string. Up to 250000 characters long. + mime_type (str): + Optional. The format of + [content][google.cloud.automl.v1beta1.TextSnippet.content]. + Currently the only two allowed values are "text/html" and + "text/plain". If left blank, the format is automatically + determined from the type of the uploaded + [content][google.cloud.automl.v1beta1.TextSnippet.content]. + content_uri (str): + Output only. HTTP URI where you can download + the content. + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + content_uri = proto.Field(proto.STRING, number=4) + + +class DocumentDimensions(proto.Message): + r"""Message that describes dimension of a document. + + Attributes: + unit (~.data_items.DocumentDimensions.DocumentDimensionUnit): + Unit of the dimension. + width (float): + Width value of the document, works together + with the unit. + height (float): + Height value of the document, works together + with the unit. + """ + + class DocumentDimensionUnit(proto.Enum): + r"""Unit of the document dimension.""" + DOCUMENT_DIMENSION_UNIT_UNSPECIFIED = 0 + INCH = 1 + CENTIMETER = 2 + POINT = 3 + + unit = proto.Field(proto.ENUM, number=1, enum=DocumentDimensionUnit,) + + width = proto.Field(proto.FLOAT, number=2) + + height = proto.Field(proto.FLOAT, number=3) + + +class Document(proto.Message): + r"""A structured text document e.g. a PDF. + + Attributes: + input_config (~.io.DocumentInputConfig): + An input config specifying the content of the + document. + document_text (~.data_items.TextSnippet): + The plain text version of this document. + layout (Sequence[~.data_items.Document.Layout]): + Describes the layout of the document. Sorted by + [page_number][]. + document_dimensions (~.data_items.DocumentDimensions): + The dimensions of the page in the document. + page_count (int): + Number of pages in the document. + """ + + class Layout(proto.Message): + r"""Describes the layout information of a + [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] + in the document. + + Attributes: + text_segment (~.gca_text_segment.TextSegment): + Text Segment that represents a segment in + [document_text][google.cloud.automl.v1beta1.Document.document_text]. + page_number (int): + Page number of the + [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] + in the original document, starts from 1. + bounding_poly (~.geometry.BoundingPoly): + The position of the + [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] + in the page. Contains exactly 4 + + [normalized_vertices][google.cloud.automl.v1beta1.BoundingPoly.normalized_vertices] + and they are connected by edges in the order provided, which + will represent a rectangle parallel to the frame. The + [NormalizedVertex-s][google.cloud.automl.v1beta1.NormalizedVertex] + are relative to the page. Coordinates are based on top-left + as point (0,0). + text_segment_type (~.data_items.Document.Layout.TextSegmentType): + The type of the + [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] + in document. + """ + + class TextSegmentType(proto.Enum): + r"""The type of TextSegment in the context of the original + document. + """ + TEXT_SEGMENT_TYPE_UNSPECIFIED = 0 + TOKEN = 1 + PARAGRAPH = 2 + FORM_FIELD = 3 + FORM_FIELD_NAME = 4 + FORM_FIELD_CONTENTS = 5 + TABLE = 6 + TABLE_HEADER = 7 + TABLE_ROW = 8 + TABLE_CELL = 9 + + text_segment = proto.Field( + proto.MESSAGE, number=1, message=gca_text_segment.TextSegment, + ) + + page_number = proto.Field(proto.INT32, number=2) + + bounding_poly = proto.Field( + proto.MESSAGE, number=3, message=geometry.BoundingPoly, + ) + + text_segment_type = proto.Field( + proto.ENUM, number=4, enum="Document.Layout.TextSegmentType", + ) + + input_config = proto.Field(proto.MESSAGE, number=1, message=io.DocumentInputConfig,) + + document_text = proto.Field(proto.MESSAGE, number=2, message=TextSnippet,) + + layout = proto.RepeatedField(proto.MESSAGE, number=3, message=Layout,) + + document_dimensions = proto.Field( + proto.MESSAGE, number=4, message=DocumentDimensions, + ) + + page_count = proto.Field(proto.INT32, number=5) + + +class Row(proto.Message): + r"""A representation of a row in a relational table. + + Attributes: + column_spec_ids (Sequence[str]): + The resource IDs of the column specs describing the columns + of the row. If set must contain, but possibly in a different + order, all input feature + + [column_spec_ids][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] + of the Model this row is being passed to. Note: The below + ``values`` field must match order of this field, if this + field is set. + values (Sequence[~.struct.Value]): + Required. The values of the row cells, given in the same + order as the column_spec_ids, or, if not set, then in the + same order as input feature + + [column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] + of the Model this row is being passed to. + """ + + column_spec_ids = proto.RepeatedField(proto.STRING, number=2) + + values = proto.RepeatedField(proto.MESSAGE, number=3, message=struct.Value,) + + +class ExamplePayload(proto.Message): + r"""Example data used for training or prediction. + + Attributes: + image (~.data_items.Image): + Example image. + text_snippet (~.data_items.TextSnippet): + Example text. + document (~.data_items.Document): + Example document. + row (~.data_items.Row): + Example relational table row. + """ + + image = proto.Field(proto.MESSAGE, number=1, oneof="payload", message=Image,) + + text_snippet = proto.Field( + proto.MESSAGE, number=2, oneof="payload", message=TextSnippet, + ) + + document = proto.Field(proto.MESSAGE, number=4, oneof="payload", message=Document,) + + row = proto.Field(proto.MESSAGE, number=3, oneof="payload", message=Row,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/data_stats.py b/google/cloud/automl_v1beta1/types/data_stats.py new file mode 100644 index 00000000..4ae40fc8 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/data_stats.py @@ -0,0 +1,275 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={ + "DataStats", + "Float64Stats", + "StringStats", + "TimestampStats", + "ArrayStats", + "StructStats", + "CategoryStats", + "CorrelationStats", + }, +) + + +class DataStats(proto.Message): + r"""The data statistics of a series of values that share the same + DataType. + + Attributes: + float64_stats (~.data_stats.Float64Stats): + The statistics for FLOAT64 DataType. + string_stats (~.data_stats.StringStats): + The statistics for STRING DataType. + timestamp_stats (~.data_stats.TimestampStats): + The statistics for TIMESTAMP DataType. + array_stats (~.data_stats.ArrayStats): + The statistics for ARRAY DataType. + struct_stats (~.data_stats.StructStats): + The statistics for STRUCT DataType. + category_stats (~.data_stats.CategoryStats): + The statistics for CATEGORY DataType. + distinct_value_count (int): + The number of distinct values. + null_value_count (int): + The number of values that are null. + valid_value_count (int): + The number of values that are valid. + """ + + float64_stats = proto.Field( + proto.MESSAGE, number=3, oneof="stats", message="Float64Stats", + ) + + string_stats = proto.Field( + proto.MESSAGE, number=4, oneof="stats", message="StringStats", + ) + + timestamp_stats = proto.Field( + proto.MESSAGE, number=5, oneof="stats", message="TimestampStats", + ) + + array_stats = proto.Field( + proto.MESSAGE, number=6, oneof="stats", message="ArrayStats", + ) + + struct_stats = proto.Field( + proto.MESSAGE, number=7, oneof="stats", message="StructStats", + ) + + category_stats = proto.Field( + proto.MESSAGE, number=8, oneof="stats", message="CategoryStats", + ) + + distinct_value_count = proto.Field(proto.INT64, number=1) + + null_value_count = proto.Field(proto.INT64, number=2) + + valid_value_count = proto.Field(proto.INT64, number=9) + + +class Float64Stats(proto.Message): + r"""The data statistics of a series of FLOAT64 values. + + Attributes: + mean (float): + The mean of the series. + standard_deviation (float): + The standard deviation of the series. + quantiles (Sequence[float]): + Ordered from 0 to k k-quantile values of the data series of + n values. The value at index i is, approximately, the + i*n/k-th smallest value in the series; for i = 0 and i = k + these are, respectively, the min and max values. + histogram_buckets (Sequence[~.data_stats.Float64Stats.HistogramBucket]): + Histogram buckets of the data series. Sorted by the min + value of the bucket, ascendingly, and the number of the + buckets is dynamically generated. The buckets are + non-overlapping and completely cover whole FLOAT64 range + with min of first bucket being ``"-Infinity"``, and max of + the last one being ``"Infinity"``. + """ + + class HistogramBucket(proto.Message): + r"""A bucket of a histogram. + + Attributes: + min (float): + The minimum value of the bucket, inclusive. + max (float): + The maximum value of the bucket, exclusive unless max = + ``"Infinity"``, in which case it's inclusive. + count (int): + The number of data values that are in the + bucket, i.e. are between min and max values. + """ + + min = proto.Field(proto.DOUBLE, number=1) + + max = proto.Field(proto.DOUBLE, number=2) + + count = proto.Field(proto.INT64, number=3) + + mean = proto.Field(proto.DOUBLE, number=1) + + standard_deviation = proto.Field(proto.DOUBLE, number=2) + + quantiles = proto.RepeatedField(proto.DOUBLE, number=3) + + histogram_buckets = proto.RepeatedField( + proto.MESSAGE, number=4, message=HistogramBucket, + ) + + +class StringStats(proto.Message): + r"""The data statistics of a series of STRING values. + + Attributes: + top_unigram_stats (Sequence[~.data_stats.StringStats.UnigramStats]): + The statistics of the top 20 unigrams, ordered by + [count][google.cloud.automl.v1beta1.StringStats.UnigramStats.count]. + """ + + class UnigramStats(proto.Message): + r"""The statistics of a unigram. + + Attributes: + value (str): + The unigram. + count (int): + The number of occurrences of this unigram in + the series. + """ + + value = proto.Field(proto.STRING, number=1) + + count = proto.Field(proto.INT64, number=2) + + top_unigram_stats = proto.RepeatedField( + proto.MESSAGE, number=1, message=UnigramStats, + ) + + +class TimestampStats(proto.Message): + r"""The data statistics of a series of TIMESTAMP values. + + Attributes: + granular_stats (Sequence[~.data_stats.TimestampStats.GranularStatsEntry]): + The string key is the pre-defined granularity. Currently + supported: hour_of_day, day_of_week, month_of_year. + Granularities finer that the granularity of timestamp data + are not populated (e.g. if timestamps are at day + granularity, then hour_of_day is not populated). + """ + + class GranularStats(proto.Message): + r"""Stats split by a defined in context granularity. + + Attributes: + buckets (Sequence[~.data_stats.TimestampStats.GranularStats.BucketsEntry]): + A map from granularity key to example count for that key. + E.g. for hour_of_day ``13`` means 1pm, or for month_of_year + ``5`` means May). + """ + + buckets = proto.MapField(proto.INT32, proto.INT64, number=1) + + granular_stats = proto.MapField( + proto.STRING, proto.MESSAGE, number=1, message=GranularStats, + ) + + +class ArrayStats(proto.Message): + r"""The data statistics of a series of ARRAY values. + + Attributes: + member_stats (~.data_stats.DataStats): + Stats of all the values of all arrays, as if + they were a single long series of data. The type + depends on the element type of the array. + """ + + member_stats = proto.Field(proto.MESSAGE, number=2, message=DataStats,) + + +class StructStats(proto.Message): + r"""The data statistics of a series of STRUCT values. + + Attributes: + field_stats (Sequence[~.data_stats.StructStats.FieldStatsEntry]): + Map from a field name of the struct to data + stats aggregated over series of all data in that + field across all the structs. + """ + + field_stats = proto.MapField( + proto.STRING, proto.MESSAGE, number=1, message=DataStats, + ) + + +class CategoryStats(proto.Message): + r"""The data statistics of a series of CATEGORY values. + + Attributes: + top_category_stats (Sequence[~.data_stats.CategoryStats.SingleCategoryStats]): + The statistics of the top 20 CATEGORY values, ordered by + + [count][google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats.count]. + """ + + class SingleCategoryStats(proto.Message): + r"""The statistics of a single CATEGORY value. + + Attributes: + value (str): + The CATEGORY value. + count (int): + The number of occurrences of this value in + the series. + """ + + value = proto.Field(proto.STRING, number=1) + + count = proto.Field(proto.INT64, number=2) + + top_category_stats = proto.RepeatedField( + proto.MESSAGE, number=1, message=SingleCategoryStats, + ) + + +class CorrelationStats(proto.Message): + r"""A correlation statistics between two series of DataType + values. The series may have differing DataType-s, but within a + single series the DataType must be the same. + + Attributes: + cramers_v (float): + The correlation value using the Cramer's V + measure. + """ + + cramers_v = proto.Field(proto.DOUBLE, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/data_types.py b/google/cloud/automl_v1beta1/types/data_types.py new file mode 100644 index 00000000..e2a3152e --- /dev/null +++ b/google/cloud/automl_v1beta1/types/data_types.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={"TypeCode", "DataType", "StructType",}, +) + + +class TypeCode(proto.Enum): + r"""``TypeCode`` is used as a part of + [DataType][google.cloud.automl.v1beta1.DataType]. + """ + TYPE_CODE_UNSPECIFIED = 0 + FLOAT64 = 3 + TIMESTAMP = 4 + STRING = 6 + ARRAY = 8 + STRUCT = 9 + CATEGORY = 10 + + +class DataType(proto.Message): + r"""Indicated the type of data that can be stored in a structured + data entity (e.g. a table). + + Attributes: + list_element_type (~.data_types.DataType): + If + [type_code][google.cloud.automl.v1beta1.DataType.type_code] + == [ARRAY][google.cloud.automl.v1beta1.TypeCode.ARRAY], then + ``list_element_type`` is the type of the elements. + struct_type (~.data_types.StructType): + If + [type_code][google.cloud.automl.v1beta1.DataType.type_code] + == [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT], + then ``struct_type`` provides type information for the + struct's fields. + time_format (str): + If + [type_code][google.cloud.automl.v1beta1.DataType.type_code] + == + [TIMESTAMP][google.cloud.automl.v1beta1.TypeCode.TIMESTAMP] + then ``time_format`` provides the format in which that time + field is expressed. The time_format must either be one of: + + - ``UNIX_SECONDS`` + - ``UNIX_MILLISECONDS`` + - ``UNIX_MICROSECONDS`` + - ``UNIX_NANOSECONDS`` (for respectively number of seconds, + milliseconds, microseconds and nanoseconds since start of + the Unix epoch); or be written in ``strftime`` syntax. If + time_format is not set, then the default format as + described on the type_code is used. + type_code (~.data_types.TypeCode): + Required. The + [TypeCode][google.cloud.automl.v1beta1.TypeCode] for this + type. + nullable (bool): + If true, this DataType can also be ``NULL``. In .CSV files + ``NULL`` value is expressed as an empty string. + """ + + list_element_type = proto.Field( + proto.MESSAGE, number=2, oneof="details", message="DataType", + ) + + struct_type = proto.Field( + proto.MESSAGE, number=3, oneof="details", message="StructType", + ) + + time_format = proto.Field(proto.STRING, number=5, oneof="details") + + type_code = proto.Field(proto.ENUM, number=1, enum="TypeCode",) + + nullable = proto.Field(proto.BOOL, number=4) + + +class StructType(proto.Message): + r"""``StructType`` defines the DataType-s of a + [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT] type. + + Attributes: + fields (Sequence[~.data_types.StructType.FieldsEntry]): + Unordered map of struct field names to their + data types. Fields cannot be added or removed + via Update. Their names and data types are still + mutable. + """ + + fields = proto.MapField(proto.STRING, proto.MESSAGE, number=1, message=DataType,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/dataset.py b/google/cloud/automl_v1beta1/types/dataset.py new file mode 100644 index 00000000..ef31c859 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/dataset.py @@ -0,0 +1,164 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import image +from google.cloud.automl_v1beta1.types import tables +from google.cloud.automl_v1beta1.types import text +from google.cloud.automl_v1beta1.types import translation +from google.cloud.automl_v1beta1.types import video +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", manifest={"Dataset",}, +) + + +class Dataset(proto.Message): + r"""A workspace for solving a single, particular machine learning + (ML) problem. A workspace contains examples that may be + annotated. + + Attributes: + translation_dataset_metadata (~.translation.TranslationDatasetMetadata): + Metadata for a dataset used for translation. + image_classification_dataset_metadata (~.image.ImageClassificationDatasetMetadata): + Metadata for a dataset used for image + classification. + text_classification_dataset_metadata (~.text.TextClassificationDatasetMetadata): + Metadata for a dataset used for text + classification. + image_object_detection_dataset_metadata (~.image.ImageObjectDetectionDatasetMetadata): + Metadata for a dataset used for image object + detection. + video_classification_dataset_metadata (~.video.VideoClassificationDatasetMetadata): + Metadata for a dataset used for video + classification. + video_object_tracking_dataset_metadata (~.video.VideoObjectTrackingDatasetMetadata): + Metadata for a dataset used for video object + tracking. + text_extraction_dataset_metadata (~.text.TextExtractionDatasetMetadata): + Metadata for a dataset used for text + extraction. + text_sentiment_dataset_metadata (~.text.TextSentimentDatasetMetadata): + Metadata for a dataset used for text + sentiment. + tables_dataset_metadata (~.tables.TablesDatasetMetadata): + Metadata for a dataset used for Tables. + name (str): + Output only. The resource name of the dataset. Form: + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`` + display_name (str): + Required. The name of the dataset to show in the interface. + The name can be up to 32 characters long and can consist + only of ASCII Latin letters A-Z and a-z, underscores (_), + and ASCII digits 0-9. + description (str): + User-provided description of the dataset. The + description can be up to 25000 characters long. + example_count (int): + Output only. The number of examples in the + dataset. + create_time (~.timestamp.Timestamp): + Output only. Timestamp when this dataset was + created. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + """ + + translation_dataset_metadata = proto.Field( + proto.MESSAGE, + number=23, + oneof="dataset_metadata", + message=translation.TranslationDatasetMetadata, + ) + + image_classification_dataset_metadata = proto.Field( + proto.MESSAGE, + number=24, + oneof="dataset_metadata", + message=image.ImageClassificationDatasetMetadata, + ) + + text_classification_dataset_metadata = proto.Field( + proto.MESSAGE, + number=25, + oneof="dataset_metadata", + message=text.TextClassificationDatasetMetadata, + ) + + image_object_detection_dataset_metadata = proto.Field( + proto.MESSAGE, + number=26, + oneof="dataset_metadata", + message=image.ImageObjectDetectionDatasetMetadata, + ) + + video_classification_dataset_metadata = proto.Field( + proto.MESSAGE, + number=31, + oneof="dataset_metadata", + message=video.VideoClassificationDatasetMetadata, + ) + + video_object_tracking_dataset_metadata = proto.Field( + proto.MESSAGE, + number=29, + oneof="dataset_metadata", + message=video.VideoObjectTrackingDatasetMetadata, + ) + + text_extraction_dataset_metadata = proto.Field( + proto.MESSAGE, + number=28, + oneof="dataset_metadata", + message=text.TextExtractionDatasetMetadata, + ) + + text_sentiment_dataset_metadata = proto.Field( + proto.MESSAGE, + number=30, + oneof="dataset_metadata", + message=text.TextSentimentDatasetMetadata, + ) + + tables_dataset_metadata = proto.Field( + proto.MESSAGE, + number=33, + oneof="dataset_metadata", + message=tables.TablesDatasetMetadata, + ) + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + description = proto.Field(proto.STRING, number=3) + + example_count = proto.Field(proto.INT32, number=21) + + create_time = proto.Field(proto.MESSAGE, number=14, message=timestamp.Timestamp,) + + etag = proto.Field(proto.STRING, number=17) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/detection.py b/google/cloud/automl_v1beta1/types/detection.py new file mode 100644 index 00000000..3f3339ab --- /dev/null +++ b/google/cloud/automl_v1beta1/types/detection.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import geometry +from google.protobuf import duration_pb2 as duration # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={ + "ImageObjectDetectionAnnotation", + "VideoObjectTrackingAnnotation", + "BoundingBoxMetricsEntry", + "ImageObjectDetectionEvaluationMetrics", + "VideoObjectTrackingEvaluationMetrics", + }, +) + + +class ImageObjectDetectionAnnotation(proto.Message): + r"""Annotation details for image object detection. + + Attributes: + bounding_box (~.geometry.BoundingPoly): + Output only. The rectangle representing the + object location. + score (float): + Output only. The confidence that this annotation is positive + for the parent example, value in [0, 1], higher means higher + positivity confidence. + """ + + bounding_box = proto.Field(proto.MESSAGE, number=1, message=geometry.BoundingPoly,) + + score = proto.Field(proto.FLOAT, number=2) + + +class VideoObjectTrackingAnnotation(proto.Message): + r"""Annotation details for video object tracking. + + Attributes: + instance_id (str): + Optional. The instance of the object, + expressed as a positive integer. Used to tell + apart objects of the same type (i.e. + AnnotationSpec) when multiple are present on a + single example. + NOTE: Instance ID prediction quality is not a + part of model evaluation and is done as best + effort. Especially in cases when an entity goes + off-screen for a longer time (minutes), when it + comes back it may be given a new instance ID. + time_offset (~.duration.Duration): + Required. A time (frame) of a video to which + this annotation pertains. Represented as the + duration since the video's start. + bounding_box (~.geometry.BoundingPoly): + Required. The rectangle representing the object location on + the frame (i.e. at the time_offset of the video). + score (float): + Output only. The confidence that this annotation is positive + for the video at the time_offset, value in [0, 1], higher + means higher positivity confidence. For annotations created + by the user the score is 1. When user approves an + annotation, the original float score is kept (and not + changed to 1). + """ + + instance_id = proto.Field(proto.STRING, number=1) + + time_offset = proto.Field(proto.MESSAGE, number=2, message=duration.Duration,) + + bounding_box = proto.Field(proto.MESSAGE, number=3, message=geometry.BoundingPoly,) + + score = proto.Field(proto.FLOAT, number=4) + + +class BoundingBoxMetricsEntry(proto.Message): + r"""Bounding box matching model metrics for a single + intersection-over-union threshold and multiple label match + confidence thresholds. + + Attributes: + iou_threshold (float): + Output only. The intersection-over-union + threshold value used to compute this metrics + entry. + mean_average_precision (float): + Output only. The mean average precision, most often close to + au_prc. + confidence_metrics_entries (Sequence[~.detection.BoundingBoxMetricsEntry.ConfidenceMetricsEntry]): + Output only. Metrics for each label-match + confidence_threshold from + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall + curve is derived from them. + """ + + class ConfidenceMetricsEntry(proto.Message): + r"""Metrics for a single confidence threshold. + + Attributes: + confidence_threshold (float): + Output only. The confidence threshold value + used to compute the metrics. + recall (float): + Output only. Recall under the given + confidence threshold. + precision (float): + Output only. Precision under the given + confidence threshold. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + recall = proto.Field(proto.FLOAT, number=2) + + precision = proto.Field(proto.FLOAT, number=3) + + f1_score = proto.Field(proto.FLOAT, number=4) + + iou_threshold = proto.Field(proto.FLOAT, number=1) + + mean_average_precision = proto.Field(proto.FLOAT, number=2) + + confidence_metrics_entries = proto.RepeatedField( + proto.MESSAGE, number=3, message=ConfidenceMetricsEntry, + ) + + +class ImageObjectDetectionEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for image object detection problems. + Evaluates prediction quality of labeled bounding boxes. + + Attributes: + evaluated_bounding_box_count (int): + Output only. The total number of bounding + boxes (i.e. summed over all images) the ground + truth used to create this evaluation had. + bounding_box_metrics_entries (Sequence[~.detection.BoundingBoxMetricsEntry]): + Output only. The bounding boxes match metrics + for each Intersection-over-union threshold + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each + label confidence threshold + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. + bounding_box_mean_average_precision (float): + Output only. The single metric for bounding boxes + evaluation: the mean_average_precision averaged over all + bounding_box_metrics_entries. + """ + + evaluated_bounding_box_count = proto.Field(proto.INT32, number=1) + + bounding_box_metrics_entries = proto.RepeatedField( + proto.MESSAGE, number=2, message=BoundingBoxMetricsEntry, + ) + + bounding_box_mean_average_precision = proto.Field(proto.FLOAT, number=3) + + +class VideoObjectTrackingEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for video object tracking problems. + Evaluates prediction quality of both labeled bounding boxes and + labeled tracks (i.e. series of bounding boxes sharing same label + and instance ID). + + Attributes: + evaluated_frame_count (int): + Output only. The number of video frames used + to create this evaluation. + evaluated_bounding_box_count (int): + Output only. The total number of bounding + boxes (i.e. summed over all frames) the ground + truth used to create this evaluation had. + bounding_box_metrics_entries (Sequence[~.detection.BoundingBoxMetricsEntry]): + Output only. The bounding boxes match metrics + for each Intersection-over-union threshold + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each + label confidence threshold + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. + bounding_box_mean_average_precision (float): + Output only. The single metric for bounding boxes + evaluation: the mean_average_precision averaged over all + bounding_box_metrics_entries. + """ + + evaluated_frame_count = proto.Field(proto.INT32, number=1) + + evaluated_bounding_box_count = proto.Field(proto.INT32, number=2) + + bounding_box_metrics_entries = proto.RepeatedField( + proto.MESSAGE, number=4, message=BoundingBoxMetricsEntry, + ) + + bounding_box_mean_average_precision = proto.Field(proto.FLOAT, number=6) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/geometry.py b/google/cloud/automl_v1beta1/types/geometry.py new file mode 100644 index 00000000..7a463691 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/geometry.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={"NormalizedVertex", "BoundingPoly",}, +) + + +class NormalizedVertex(proto.Message): + r"""A vertex represents a 2D point in the image. + The normalized vertex coordinates are between 0 to 1 fractions + relative to the original plane (image, video). E.g. if the plane + (e.g. whole image) would have size 10 x 20 then a point with + normalized coordinates (0.1, 0.3) would be at the position (1, + 6) on that plane. + + Attributes: + x (float): + Required. Horizontal coordinate. + y (float): + Required. Vertical coordinate. + """ + + x = proto.Field(proto.FLOAT, number=1) + + y = proto.Field(proto.FLOAT, number=2) + + +class BoundingPoly(proto.Message): + r"""A bounding polygon of a detected object on a plane. On output both + vertices and normalized_vertices are provided. The polygon is formed + by connecting vertices in the order they are listed. + + Attributes: + normalized_vertices (Sequence[~.geometry.NormalizedVertex]): + Output only . The bounding polygon normalized + vertices. + """ + + normalized_vertices = proto.RepeatedField( + proto.MESSAGE, number=2, message=NormalizedVertex, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/image.py b/google/cloud/automl_v1beta1/types/image.py new file mode 100644 index 00000000..636fa469 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/image.py @@ -0,0 +1,266 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import classification + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={ + "ImageClassificationDatasetMetadata", + "ImageObjectDetectionDatasetMetadata", + "ImageClassificationModelMetadata", + "ImageObjectDetectionModelMetadata", + "ImageClassificationModelDeploymentMetadata", + "ImageObjectDetectionModelDeploymentMetadata", + }, +) + + +class ImageClassificationDatasetMetadata(proto.Message): + r"""Dataset metadata that is specific to image classification. + + Attributes: + classification_type (~.classification.ClassificationType): + Required. Type of the classification problem. + """ + + classification_type = proto.Field( + proto.ENUM, number=1, enum=classification.ClassificationType, + ) + + +class ImageObjectDetectionDatasetMetadata(proto.Message): + r"""Dataset metadata specific to image object detection.""" + + +class ImageClassificationModelMetadata(proto.Message): + r"""Model metadata for image classification. + + Attributes: + base_model_id (str): + Optional. The ID of the ``base`` model. If it is specified, + the new model will be created based on the ``base`` model. + Otherwise, the new model will be created from scratch. The + ``base`` model must be in the same ``project`` and + ``location`` as the new model to create, and have the same + ``model_type``. + train_budget (int): + Required. The train budget of creating this model, expressed + in hours. The actual ``train_cost`` will be equal or less + than this value. + train_cost (int): + Output only. The actual train cost of creating this model, + expressed in hours. If this model is created from a ``base`` + model, the train cost used to create the ``base`` model are + not included. + stop_reason (str): + Output only. The reason that this create model operation + stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. + model_type (str): + Optional. Type of the model. The available values are: + + - ``cloud`` - Model to be used via prediction calls to + AutoML API. This is the default value. + - ``mobile-low-latency-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have low latency, but may have + lower prediction quality than other models. + - ``mobile-versatile-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. + - ``mobile-high-accuracy-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have a higher latency, but should + also have a higher prediction quality than other models. + - ``mobile-core-ml-low-latency-1`` - A model that, in + addition to providing prediction via AutoML API, can also + be exported (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile device with Core ML afterwards. + Expected to have low latency, but may have lower + prediction quality than other models. + - ``mobile-core-ml-versatile-1`` - A model that, in + addition to providing prediction via AutoML API, can also + be exported (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile device with Core ML afterwards. + - ``mobile-core-ml-high-accuracy-1`` - A model that, in + addition to providing prediction via AutoML API, can also + be exported (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile device with Core ML afterwards. + Expected to have a higher latency, but should also have a + higher prediction quality than other models. + node_qps (float): + Output only. An approximate number of online + prediction QPS that can be supported by this + model per each node on which it is deployed. + node_count (int): + Output only. The number of nodes this model is deployed on. + A node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the node_qps field. + """ + + base_model_id = proto.Field(proto.STRING, number=1) + + train_budget = proto.Field(proto.INT64, number=2) + + train_cost = proto.Field(proto.INT64, number=3) + + stop_reason = proto.Field(proto.STRING, number=5) + + model_type = proto.Field(proto.STRING, number=7) + + node_qps = proto.Field(proto.DOUBLE, number=13) + + node_count = proto.Field(proto.INT64, number=14) + + +class ImageObjectDetectionModelMetadata(proto.Message): + r"""Model metadata specific to image object detection. + + Attributes: + model_type (str): + Optional. Type of the model. The available values are: + + - ``cloud-high-accuracy-1`` - (default) A model to be used + via prediction calls to AutoML API. Expected to have a + higher latency, but should also have a higher prediction + quality than other models. + - ``cloud-low-latency-1`` - A model to be used via + prediction calls to AutoML API. Expected to have low + latency, but may have lower prediction quality than other + models. + - ``mobile-low-latency-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have low latency, but may have + lower prediction quality than other models. + - ``mobile-versatile-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. + - ``mobile-high-accuracy-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have a higher latency, but should + also have a higher prediction quality than other models. + node_count (int): + Output only. The number of nodes this model is deployed on. + A node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the qps_per_node + field. + node_qps (float): + Output only. An approximate number of online + prediction QPS that can be supported by this + model per each node on which it is deployed. + stop_reason (str): + Output only. The reason that this create model operation + stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. + train_budget_milli_node_hours (int): + The train budget of creating this model, expressed in milli + node hours i.e. 1,000 value in this field means 1 node hour. + The actual ``train_cost`` will be equal or less than this + value. If further model training ceases to provide any + improvements, it will stop without using full budget and the + stop_reason will be ``MODEL_CONVERGED``. Note, node_hour = + actual_hour \* number_of_nodes_invovled. For model type + ``cloud-high-accuracy-1``\ (default) and + ``cloud-low-latency-1``, the train budget must be between + 20,000 and 900,000 milli node hours, inclusive. The default + value is 216, 000 which represents one day in wall time. For + model type ``mobile-low-latency-1``, ``mobile-versatile-1``, + ``mobile-high-accuracy-1``, + ``mobile-core-ml-low-latency-1``, + ``mobile-core-ml-versatile-1``, + ``mobile-core-ml-high-accuracy-1``, the train budget must be + between 1,000 and 100,000 milli node hours, inclusive. The + default value is 24, 000 which represents one day in wall + time. + train_cost_milli_node_hours (int): + Output only. The actual train cost of + creating this model, expressed in milli node + hours, i.e. 1,000 value in this field means 1 + node hour. Guaranteed to not exceed the train + budget. + """ + + model_type = proto.Field(proto.STRING, number=1) + + node_count = proto.Field(proto.INT64, number=3) + + node_qps = proto.Field(proto.DOUBLE, number=4) + + stop_reason = proto.Field(proto.STRING, number=5) + + train_budget_milli_node_hours = proto.Field(proto.INT64, number=6) + + train_cost_milli_node_hours = proto.Field(proto.INT64, number=7) + + +class ImageClassificationModelDeploymentMetadata(proto.Message): + r"""Model deployment metadata specific to Image Classification. + + Attributes: + node_count (int): + Input only. The number of nodes to deploy the model on. A + node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the model's + + [node_qps][google.cloud.automl.v1beta1.ImageClassificationModelMetadata.node_qps]. + Must be between 1 and 100, inclusive on both ends. + """ + + node_count = proto.Field(proto.INT64, number=1) + + +class ImageObjectDetectionModelDeploymentMetadata(proto.Message): + r"""Model deployment metadata specific to Image Object Detection. + + Attributes: + node_count (int): + Input only. The number of nodes to deploy the model on. A + node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the model's + + [qps_per_node][google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata.qps_per_node]. + Must be between 1 and 100, inclusive on both ends. + """ + + node_count = proto.Field(proto.INT64, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/io.py b/google/cloud/automl_v1beta1/types/io.py new file mode 100644 index 00000000..bb91b204 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/io.py @@ -0,0 +1,1115 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={ + "InputConfig", + "BatchPredictInputConfig", + "DocumentInputConfig", + "OutputConfig", + "BatchPredictOutputConfig", + "ModelExportOutputConfig", + "ExportEvaluatedExamplesOutputConfig", + "GcsSource", + "BigQuerySource", + "GcsDestination", + "BigQueryDestination", + "GcrDestination", + }, +) + + +class InputConfig(proto.Message): + r"""Input configuration for ImportData Action. + + The format of input depends on dataset_metadata the Dataset into + which the import is happening has. As input source the + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is + expected, unless specified otherwise. Additionally any input .CSV + file by itself must be 100MB or smaller, unless specified otherwise. + If an "example" file (that is, image, video etc.) with identical + content (even if it had different GCS_FILE_PATH) is mentioned + multiple times, then its label, bounding boxes etc. are appended. + The same file should be always provided with the same ML_USE and + GCS_FILE_PATH, if it is not, then these values are + nondeterministically selected from the given ones. + + The formats are represented in EBNF with commas being literal and + with non-terminal symbols defined near the end of this comment. The + formats are: + + - For Image Classification: CSV file(s) with each line in format: + ML_USE,GCS_FILE_PATH,LABEL,LABEL,... GCS_FILE_PATH leads to image + of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, + .WEBP, .BMP, .TIFF, .ICO For MULTICLASS classification type, at + most one LABEL is allowed per image. If an image has not yet been + labeled, then it should be mentioned just once with no LABEL. + Some sample rows: TRAIN,gs://folder/image1.jpg,daisy + TEST,gs://folder/image2.jpg,dandelion,tulip,rose + UNASSIGNED,gs://folder/image3.jpg,daisy + UNASSIGNED,gs://folder/image4.jpg + + - For Image Object Detection: CSV file(s) with each line in format: + ML_USE,GCS_FILE_PATH,(LABEL,BOUNDING_BOX \| ,,,,,,,) + GCS_FILE_PATH leads to image of up to 30MB in size. Supported + extensions: .JPEG, .GIF, .PNG. Each image is assumed to be + exhaustively labeled. The minimum allowed BOUNDING_BOX edge + length is 0.01, and no more than 500 BOUNDING_BOX-es per image + are allowed (one BOUNDING_BOX is defined per line). If an image + has not yet been labeled, then it should be mentioned just once + with no LABEL and the ",,,,,,," in place of the BOUNDING_BOX. For + images which are known to not contain any bounding boxes, they + should be labelled explictly as "NEGATIVE_IMAGE", followed by + ",,,,,,," in place of the BOUNDING_BOX. Sample rows: + TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, + TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, + UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 + TEST,gs://folder/im3.png,,,,,,,,, + TRAIN,gs://folder/im4.png,NEGATIVE_IMAGE,,,,,,,,, + + - For Video Classification: CSV file(s) with each line in format: + ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be + used. The GCS_FILE_PATH should lead to another .csv file which + describes examples that have given ML_USE, using the following + row format: + GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END \| ,,) + Here GCS_FILE_PATH leads to a video of up to 50GB in size and up + to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. + TIME_SEGMENT_START and TIME_SEGMENT_END must be within the length + of the video, and end has to be after the start. Any segment of a + video which has one or more labels on it, is considered a hard + negative for all other labels. Any segment with no labels on it + is considered to be unknown. If a whole video is unknown, then it + shuold be mentioned just once with ",," in place of LABEL, + TIME_SEGMENT_START,TIME_SEGMENT_END. Sample top level CSV file: + TRAIN,gs://folder/train_videos.csv + TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv Sample rows of a CSV file + for a particular ML_USE: + gs://folder/video1.avi,car,120,180.000021 + gs://folder/video1.avi,bike,150,180.000021 + gs://folder/vid2.avi,car,0,60.5 gs://folder/vid3.avi,,, + + - For Video Object Tracking: CSV file(s) with each line in format: + ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be + used. The GCS_FILE_PATH should lead to another .csv file which + describes examples that have given ML_USE, using one of the + following row format: + GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX or + GCS_FILE_PATH,,,,,,,,,, Here GCS_FILE_PATH leads to a video of up + to 50GB in size and up to 3h duration. Supported extensions: + .MOV, .MPEG4, .MP4, .AVI. Providing INSTANCE_IDs can help to + obtain a better model. When a specific labeled entity leaves the + video frame, and shows up afterwards it is not required, albeit + preferable, that the same INSTANCE_ID is given to it. TIMESTAMP + must be within the length of the video, the BOUNDING_BOX is + assumed to be drawn on the closest video's frame to the + TIMESTAMP. Any mentioned by the TIMESTAMP frame is expected to be + exhaustively labeled and no more than 500 BOUNDING_BOX-es per + frame are allowed. If a whole video is unknown, then it should be + mentioned just once with ",,,,,,,,,," in place of LABEL, + [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX. Sample top level CSV file: + TRAIN,gs://folder/train_videos.csv + TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv Seven sample rows of a + CSV file for a particular ML_USE: + gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 + gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 + gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 + gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, + gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, + gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, + gs://folder/video2.avi,,,,,,,,,,, + + - For Text Extraction: CSV file(s) with each line in format: + ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .JSONL (that is, + JSON Lines) file which either imports text in-line or as + documents. Any given .JSONL file must be 100MB or smaller. The + in-line .JSONL file contains, per line, a proto that wraps a + TextSnippet proto (in json representation) followed by one or + more AnnotationPayload protos (called annotations), which have + display_name and text_extraction detail populated. The given text + is expected to be annotated exhaustively, for example, if you + look for animals and text contains "dolphin" that is not labeled, + then "dolphin" is assumed to not be an animal. Any given text + snippet content must be 10KB or smaller, and also be UTF-8 NFC + encoded (ASCII already is). The document .JSONL file contains, + per line, a proto that wraps a Document proto. The Document proto + must have either document_text or input_config set. In + document_text case, the Document proto may also contain the + spatial information of the document, including layout, document + dimension and page number. In input_config case, only PDF + documents are supported now, and each document may be up to 2MB + large. Currently, annotations on documents cannot be specified at + import. Three sample CSV rows: TRAIN,gs://folder/file1.jsonl + VALIDATE,gs://folder/file2.jsonl TEST,gs://folder/file3.jsonl + Sample in-line JSON Lines file for entity extraction (presented + here with artificial line breaks, but the only actual line break + is denoted by \\n).: { "document": { "document_text": {"content": + "dog cat"} "layout": [ { "text_segment": { "start_offset": 0, + "end_offset": 3, }, "page_number": 1, "bounding_poly": { + "normalized_vertices": [ {"x": 0.1, "y": 0.1}, {"x": 0.1, "y": + 0.3}, {"x": 0.3, "y": 0.3}, {"x": 0.3, "y": 0.1}, ], }, + "text_segment_type": TOKEN, }, { "text_segment": { + "start_offset": 4, "end_offset": 7, }, "page_number": 1, + "bounding_poly": { "normalized_vertices": [ {"x": 0.4, "y": 0.1}, + {"x": 0.4, "y": 0.3}, {"x": 0.8, "y": 0.3}, {"x": 0.8, "y": 0.1}, + ], }, "text_segment_type": TOKEN, } + + :: + + ], + "document_dimensions": { + "width": 8.27, + "height": 11.69, + "unit": INCH, + } + "page_count": 1, + }, + "annotations": [ + { + "display_name": "animal", + "text_extraction": {"text_segment": {"start_offset": 0, + "end_offset": 3}} + }, + { + "display_name": "animal", + "text_extraction": {"text_segment": {"start_offset": 4, + "end_offset": 7}} + } + ], + }\n + { + "text_snippet": { + "content": "This dog is good." + }, + "annotations": [ + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 5, "end_offset": 8} + } + } + ] + } + Sample document JSON Lines file (presented here with artificial line + breaks, but the only actual line break is denoted by \n).: + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] + } + } + } + }\n + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] + } + } + } + } + + - For Text Classification: CSV file(s) with each line in format: + ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),LABEL,LABEL,... + TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If + the column content is a valid gcs file path, i.e. prefixed by + "gs://", it will be treated as a GCS_FILE_PATH, else if the + content is enclosed within double quotes (""), it is treated as a + TEXT_SNIPPET. In the GCS_FILE_PATH case, the path must lead to a + .txt file with UTF-8 encoding, for example, + "gs://folder/content.txt", and the content in it is extracted as + a text snippet. In TEXT_SNIPPET case, the column content + excluding quotes is treated as to be imported text snippet. In + both cases, the text snippet/file size must be within 128kB. + Maximum 100 unique labels are allowed per CSV row. Sample rows: + TRAIN,"They have bad food and very rude",RudeService,BadFood + TRAIN,gs://folder/content.txt,SlowService TEST,"Typically always + bad service there.",RudeService VALIDATE,"Stomach ache to + go.",BadFood + + - For Text Sentiment: CSV file(s) with each line in format: + ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),SENTIMENT TEXT_SNIPPET and + GCS_FILE_PATH are distinguished by a pattern. If the column + content is a valid gcs file path, that is, prefixed by "gs://", + it is treated as a GCS_FILE_PATH, otherwise it is treated as a + TEXT_SNIPPET. In the GCS_FILE_PATH case, the path must lead to a + .txt file with UTF-8 encoding, for example, + "gs://folder/content.txt", and the content in it is extracted as + a text snippet. In TEXT_SNIPPET case, the column content itself + is treated as to be imported text snippet. In both cases, the + text snippet must be up to 500 characters long. Sample rows: + TRAIN,"@freewrytin this is way too good for your product",2 + TRAIN,"I need this product so bad",3 TEST,"Thank you for this + product.",4 VALIDATE,gs://folder/content.txt,2 + + - For Tables: Either + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] + or + + [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source] + can be used. All inputs is concatenated into a single + + [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_name] + For gcs_source: CSV file(s), where the first row of the first file + is the header, containing unique column names. If the first row of a + subsequent file is the same as the header, then it is also treated + as a header. All other rows contain values for the corresponding + columns. Each .CSV file by itself must be 10GB or smaller, and their + total size must be 100GB or smaller. First three sample rows of a + CSV file: "Id","First Name","Last Name","Dob","Addresses" + + "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" + + "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} + For bigquery_source: An URI of a BigQuery table. The user data size + of the BigQuery table must be 100GB or smaller. An imported table + must have between 2 and 1,000 columns, inclusive, and between 1000 + and 100,000,000 rows, inclusive. There are at most 5 import data + running in parallel. Definitions: ML_USE = "TRAIN" \| "VALIDATE" \| + "TEST" \| "UNASSIGNED" Describes how the given example (file) should + be used for model training. "UNASSIGNED" can be used when user has + no preference. GCS_FILE_PATH = A path to file on GCS, e.g. + "gs://folder/image1.png". LABEL = A display name of an object on an + image, video etc., e.g. "dog". Must be up to 32 characters long and + can consist only of ASCII Latin letters A-Z and a-z, underscores(_), + and ASCII digits 0-9. For each label an AnnotationSpec is created + which display_name becomes the label; AnnotationSpecs are given back + in predictions. INSTANCE_ID = A positive integer that identifies a + specific instance of a labeled entity on an example. Used e.g. to + track two cars on a video while being able to tell apart which one + is which. BOUNDING_BOX = VERTEX,VERTEX,VERTEX,VERTEX \| + VERTEX,,,VERTEX,, A rectangle parallel to the frame of the example + (image, video). If 4 vertices are given they are connected by edges + in the order provided, if 2 are given they are recognized as + diagonally opposite vertices of the rectangle. VERTEX = + COORDINATE,COORDINATE First coordinate is horizontal (x), the second + is vertical (y). COORDINATE = A float in 0 to 1 range, relative to + total length of image or video in given dimension. For fractions the + leading non-decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is + in top left. TIME_SEGMENT_START = TIME_OFFSET Expresses a beginning, + inclusive, of a time segment within an example that has a time + dimension (e.g. video). TIME_SEGMENT_END = TIME_OFFSET Expresses an + end, exclusive, of a time segment within an example that has a time + dimension (e.g. video). TIME_OFFSET = A number of seconds as + measured from the start of an example (e.g. video). Fractions are + allowed, up to a microsecond precision. "inf" is allowed, and it + means the end of the example. TEXT_SNIPPET = A content of a text + snippet, UTF-8 encoded, enclosed within double quotes (""). + SENTIMENT = An integer between 0 and + Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive). + Describes the ordinal of the sentiment - higher value means a more + positive sentiment. All the values are completely relative, i.e. + neither 0 needs to mean a negative or neutral sentiment nor + sentiment_max needs to mean a positive one - it is just required + that 0 is the least positive sentiment in the data, and + sentiment_max is the most positive one. The SENTIMENT shouldn't be + confused with "score" or "magnitude" from the previous Natural + Language Sentiment Analysis API. All SENTIMENT values between 0 and + sentiment_max must be represented in the imported data. On + prediction the same 0 to sentiment_max range will be used. The + difference between neighboring sentiment values needs not to be + uniform, e.g. 1 and 2 may be similar whereas the difference between + 2 and 3 may be huge. + + Errors: If any of the provided CSV files can't be parsed or if more + than certain percent of CSV rows cannot be processed then the + operation fails and nothing is imported. Regardless of overall + success or failure the per-row failures, up to a certain count cap, + is listed in Operation.metadata.partial_failures. + + Attributes: + gcs_source (~.io.GcsSource): + The Google Cloud Storage location for the input content. In + ImportData, the gcs_source points to a csv with structure + described in the comment. + bigquery_source (~.io.BigQuerySource): + The BigQuery location for the input content. + params (Sequence[~.io.InputConfig.ParamsEntry]): + Additional domain-specific parameters describing the + semantic of the imported data, any string must be up to + 25000 characters long. + + - For Tables: ``schema_inference_version`` - (integer) + Required. The version of the algorithm that should be + used for the initial inference of the schema (columns' + DataTypes) of the table the data is being imported into. + Allowed values: "1". + """ + + gcs_source = proto.Field( + proto.MESSAGE, number=1, oneof="source", message="GcsSource", + ) + + bigquery_source = proto.Field( + proto.MESSAGE, number=3, oneof="source", message="BigQuerySource", + ) + + params = proto.MapField(proto.STRING, proto.STRING, number=2) + + +class BatchPredictInputConfig(proto.Message): + r"""Input configuration for BatchPredict Action. + + The format of input depends on the ML problem of the model used for + prediction. As input source the + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is + expected, unless specified otherwise. + + The formats are represented in EBNF with commas being literal and + with non-terminal symbols defined near the end of this comment. The + formats are: + + - For Image Classification: CSV file(s) with each line having just + a single column: GCS_FILE_PATH which leads to image of up to 30MB + in size. Supported extensions: .JPEG, .GIF, .PNG. This path is + treated as the ID in the Batch predict output. Three sample rows: + gs://folder/image1.jpeg gs://folder/image2.gif + gs://folder/image3.png + + - For Image Object Detection: CSV file(s) with each line having + just a single column: GCS_FILE_PATH which leads to image of up to + 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. This path + is treated as the ID in the Batch predict output. Three sample + rows: gs://folder/image1.jpeg gs://folder/image2.gif + gs://folder/image3.png + + - For Video Classification: CSV file(s) with each line in format: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH + leads to video of up to 50GB in size and up to 3h duration. + Supported extensions: .MOV, .MPEG4, .MP4, .AVI. + TIME_SEGMENT_START and TIME_SEGMENT_END must be within the length + of the video, and end has to be after the start. Three sample + rows: gs://folder/video1.mp4,10,40 gs://folder/video1.mp4,20,60 + gs://folder/vid2.mov,0,inf + + - For Video Object Tracking: CSV file(s) with each line in format: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH + leads to video of up to 50GB in size and up to 3h duration. + Supported extensions: .MOV, .MPEG4, .MP4, .AVI. + TIME_SEGMENT_START and TIME_SEGMENT_END must be within the length + of the video, and end has to be after the start. Three sample + rows: gs://folder/video1.mp4,10,240 + gs://folder/video1.mp4,300,360 gs://folder/vid2.mov,0,inf + + - For Text Classification: CSV file(s) with each line having just a + single column: GCS_FILE_PATH \| TEXT_SNIPPET Any given text file + can have size upto 128kB. Any given text snippet content must + have 60,000 characters or less. Three sample rows: + gs://folder/text1.txt "Some text content to predict" + gs://folder/text3.pdf Supported file extensions: .txt, .pdf + + - For Text Sentiment: CSV file(s) with each line having just a + single column: GCS_FILE_PATH \| TEXT_SNIPPET Any given text file + can have size upto 128kB. Any given text snippet content must + have 500 characters or less. Three sample rows: + gs://folder/text1.txt "Some text content to predict" + gs://folder/text3.pdf Supported file extensions: .txt, .pdf + + - For Text Extraction .JSONL (i.e. JSON Lines) file(s) which either + provide text in-line or as documents (for a single BatchPredict + call only one of the these formats may be used). The in-line + .JSONL file(s) contain per line a proto that wraps a temporary + user-assigned TextSnippet ID (string up to 2000 characters long) + called "id", a TextSnippet proto (in json representation) and + zero or more TextFeature protos. Any given text snippet content + must have 30,000 characters or less, and also be UTF-8 NFC + encoded (ASCII already is). The IDs provided should be unique. + The document .JSONL file(s) contain, per line, a proto that wraps + a Document proto with input_config set. Only PDF documents are + supported now, and each document must be up to 2MB large. Any + given .JSONL file must be 100MB or smaller, and no more than 20 + files may be given. Sample in-line JSON Lines file (presented + here with artificial line breaks, but the only actual line break + is denoted by \\n): { "id": "my_first_id", "text_snippet": { + "content": "dog car cat"}, "text_features": [ { "text_segment": + {"start_offset": 4, "end_offset": 6}, "structural_type": + PARAGRAPH, "bounding_poly": { "normalized_vertices": [ {"x": 0.1, + "y": 0.1}, {"x": 0.1, "y": 0.3}, {"x": 0.3, "y": 0.3}, {"x": 0.3, + "y": 0.1}, ] }, } ], }\n { "id": "2", "text_snippet": { + "content": "An elaborate content", "mime_type": "text/plain" } } + Sample document JSON Lines file (presented here with artificial + line breaks, but the only actual line break is denoted by \\n).: + { "document": { "input_config": { "gcs_source": { "input_uris": [ + "gs://folder/document1.pdf" ] } } } }\n { "document": { + "input_config": { "gcs_source": { "input_uris": [ + "gs://folder/document2.pdf" ] } } } } + + - For Tables: Either + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] + or + + [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source]. + GCS case: CSV file(s), each by itself 10GB or smaller and total size + must be 100GB or smaller, where first file must have a header + containing column names. If the first row of a subsequent file is + the same as the header, then it is also treated as a header. All + other rows contain values for the corresponding columns. The column + names must contain the model's + + [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] + + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] + (order doesn't matter). The columns corresponding to the model's + input feature column specs must contain values compatible with the + column spec's data types. Prediction on all the rows, i.e. the CSV + lines, will be attempted. For FORECASTING + + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: + all columns having + + [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] + type will be ignored. First three sample rows of a CSV file: "First + Name","Last Name","Dob","Addresses" + + "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" + + "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} + BigQuery case: An URI of a BigQuery table. The user data size of the + BigQuery table must be 100GB or smaller. The column names must + contain the model's + + [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] + + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] + (order doesn't matter). The columns corresponding to the model's + input feature column specs must contain values compatible with the + column spec's data types. Prediction on all the rows of the table + will be attempted. For FORECASTING + + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: + all columns having + + [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] + type will be ignored. + + Definitions: GCS_FILE_PATH = A path to file on GCS, e.g. + "gs://folder/video.avi". TEXT_SNIPPET = A content of a text snippet, + UTF-8 encoded, enclosed within double quotes ("") TIME_SEGMENT_START + = TIME_OFFSET Expresses a beginning, inclusive, of a time segment + within an example that has a time dimension (e.g. video). + TIME_SEGMENT_END = TIME_OFFSET Expresses an end, exclusive, of a + time segment within an example that has a time dimension (e.g. + video). TIME_OFFSET = A number of seconds as measured from the start + of an example (e.g. video). Fractions are allowed, up to a + microsecond precision. "inf" is allowed and it means the end of the + example. + + Errors: If any of the provided CSV files can't be parsed or if more + than certain percent of CSV rows cannot be processed then the + operation fails and prediction does not happen. Regardless of + overall success or failure the per-row failures, up to a certain + count cap, will be listed in Operation.metadata.partial_failures. + + Attributes: + gcs_source (~.io.GcsSource): + The Google Cloud Storage location for the + input content. + bigquery_source (~.io.BigQuerySource): + The BigQuery location for the input content. + """ + + gcs_source = proto.Field( + proto.MESSAGE, number=1, oneof="source", message="GcsSource", + ) + + bigquery_source = proto.Field( + proto.MESSAGE, number=2, oneof="source", message="BigQuerySource", + ) + + +class DocumentInputConfig(proto.Message): + r"""Input configuration of a + [Document][google.cloud.automl.v1beta1.Document]. + + Attributes: + gcs_source (~.io.GcsSource): + The Google Cloud Storage location of the + document file. Only a single path should be + given. Max supported size: 512MB. + Supported extensions: .PDF. + """ + + gcs_source = proto.Field(proto.MESSAGE, number=1, message="GcsSource",) + + +class OutputConfig(proto.Message): + r"""- For Translation: CSV file ``translation.csv``, with each line in + format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV file + which describes examples that have given ML_USE, using the + following row format per line: TEXT_SNIPPET (in source language) + \\t TEXT_SNIPPET (in target language) + + - For Tables: Output depends on whether the dataset was imported + from GCS or BigQuery. GCS case: + + [gcs_destination][google.cloud.automl.v1beta1.OutputConfig.gcs_destination] + must be set. Exported are CSV file(s) ``tables_1.csv``, + ``tables_2.csv``,...,\ ``tables_N.csv`` with each having as header + line the table's column names, and all other lines contain values + for the header columns. BigQuery case: + + [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] + pointing to a BigQuery project must be set. In the given project a + new dataset will be created with name + + ``export_data__`` + where will be made BigQuery-dataset-name compatible (e.g. most + special characters will become underscores), and timestamp will be + in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that + dataset a new table called ``primary_table`` will be created, and + filled with precisely the same data as this obtained on import. + + Attributes: + gcs_destination (~.io.GcsDestination): + The Google Cloud Storage location where the output is to be + written to. For Image Object Detection, Text Extraction, + Video Classification and Tables, in the given directory a + new directory will be created with name: export_data-- where + timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. + All export output will be written into that directory. + bigquery_destination (~.io.BigQueryDestination): + The BigQuery location where the output is to + be written to. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, number=1, oneof="destination", message="GcsDestination", + ) + + bigquery_destination = proto.Field( + proto.MESSAGE, number=2, oneof="destination", message="BigQueryDestination", + ) + + +class BatchPredictOutputConfig(proto.Message): + r"""Output configuration for BatchPredict Action. + + As destination the + + [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination] + must be set unless specified otherwise for a domain. If + gcs_destination is set then in the given directory a new directory + is created. Its name will be "prediction--", where timestamp is in + YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends + on the ML problem the predictions are made for. + + - For Image Classification: In the created directory files + ``image_classification_1.jsonl``, + ``image_classification_2.jsonl``,...,\ ``image_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of the successfully predicted images and annotations. A + single image will be listed only once with all its annotations, + and its annotations will never be split across files. Each .JSONL + file will contain, per line, a JSON representation of a proto + that wraps image's "ID" : "" followed by a list of zero + or more AnnotationPayload protos (called annotations), which have + classification detail populated. If prediction for any image + failed (partially or completely), then an additional + ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` + files will be created (N depends on total number of failed + predictions). These files will have a JSON representation of a + proto that wraps the same "ID" : "" but here followed + by exactly one + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only ``code`` and ``message``\ fields. + + - For Image Object Detection: In the created directory files + ``image_object_detection_1.jsonl``, + ``image_object_detection_2.jsonl``,...,\ ``image_object_detection_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of the successfully predicted images and annotations. Each + .JSONL file will contain, per line, a JSON representation of a + proto that wraps image's "ID" : "" followed by a list + of zero or more AnnotationPayload protos (called annotations), + which have image_object_detection detail populated. A single + image will be listed only once with all its annotations, and its + annotations will never be split across files. If prediction for + any image failed (partially or completely), then additional + ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` + files will be created (N depends on total number of failed + predictions). These files will have a JSON representation of a + proto that wraps the same "ID" : "" but here followed + by exactly one + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only ``code`` and ``message``\ fields. + + - For Video Classification: In the created directory a + video_classification.csv file, and a .JSON file per each video + classification requested in the input (i.e. each line in given + CSV(s)), will be created. + + :: + + The format of video_classification.csv is: + + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS + where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 + to 1 the prediction input lines (i.e. video_classification.csv has + precisely the same number of lines as the prediction input had.) + JSON_FILE_NAME = Name of .JSON file in the output directory, which + contains prediction responses for the video time segment. STATUS = + "OK" if prediction completed successfully, or an error code with + message otherwise. If STATUS is not "OK" then the .JSON file for + that line may not exist or be empty. + + :: + + Each .JSON file, assuming STATUS is "OK", will contain a list of + AnnotationPayload protos in JSON format, which are the predictions + for the video time segment the file is assigned to in the + video_classification.csv. All AnnotationPayload protos will have + video_classification field set, and will be sorted by + video_classification.type field (note that the returned types are + governed by `classifaction_types` parameter in + [PredictService.BatchPredictRequest.params][]). + + - For Video Object Tracking: In the created directory a + video_object_tracking.csv file will be created, and multiple + files video_object_trackinng_1.json, + video_object_trackinng_2.json,..., video_object_trackinng_N.json, + where N is the number of requests in the input (i.e. the number + of lines in given CSV(s)). + + :: + + The format of video_object_tracking.csv is: + + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS + where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 + to 1 the prediction input lines (i.e. video_object_tracking.csv has + precisely the same number of lines as the prediction input had.) + JSON_FILE_NAME = Name of .JSON file in the output directory, which + contains prediction responses for the video time segment. STATUS = + "OK" if prediction completed successfully, or an error code with + message otherwise. If STATUS is not "OK" then the .JSON file for + that line may not exist or be empty. + + :: + + Each .JSON file, assuming STATUS is "OK", will contain a list of + AnnotationPayload protos in JSON format, which are the predictions + for each frame of the video time segment the file is assigned to in + video_object_tracking.csv. All AnnotationPayload protos will have + video_object_tracking field set. + + - For Text Classification: In the created directory files + ``text_classification_1.jsonl``, + ``text_classification_2.jsonl``,...,\ ``text_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of inputs and annotations found. + + :: + + Each .JSONL file will contain, per line, a JSON representation of a + proto that wraps input text snippet or input text file and a list of + zero or more AnnotationPayload protos (called annotations), which + have classification detail populated. A single text snippet or file + will be listed only once with all its annotations, and its + annotations will never be split across files. + + If prediction for any text snippet or file failed (partially or + completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input text snippet or input text file followed by + exactly one + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only ``code`` and ``message``. + + - For Text Sentiment: In the created directory files + ``text_sentiment_1.jsonl``, + ``text_sentiment_2.jsonl``,...,\ ``text_sentiment_N.jsonl`` will + be created, where N may be 1, and depends on the total number of + inputs and annotations found. + + :: + + Each .JSONL file will contain, per line, a JSON representation of a + proto that wraps input text snippet or input text file and a list of + zero or more AnnotationPayload protos (called annotations), which + have text_sentiment detail populated. A single text snippet or file + will be listed only once with all its annotations, and its + annotations will never be split across files. + + If prediction for any text snippet or file failed (partially or + completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input text snippet or input text file followed by + exactly one + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only ``code`` and ``message``. + + - For Text Extraction: In the created directory files + ``text_extraction_1.jsonl``, + ``text_extraction_2.jsonl``,...,\ ``text_extraction_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of inputs and annotations found. The contents of these + .JSONL file(s) depend on whether the input used inline text, or + documents. If input was inline, then each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + given in request text snippet's "id" (if specified), followed by + input text snippet, and a list of zero or more AnnotationPayload + protos (called annotations), which have text_extraction detail + populated. A single text snippet will be listed only once with + all its annotations, and its annotations will never be split + across files. If input used documents, then each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + given in request document proto, followed by its OCR-ed + representation in the form of a text snippet, finally followed by + a list of zero or more AnnotationPayload protos (called + annotations), which have text_extraction detail populated and + refer, via their indices, to the OCR-ed text snippet. A single + document (and its text snippet) will be listed only once with all + its annotations, and its annotations will never be split across + files. If prediction for any text snippet failed (partially or + completely), then additional ``errors_1.jsonl``, + ``errors_2.jsonl``,..., ``errors_N.jsonl`` files will be created + (N depends on total number of failed predictions). These files + will have a JSON representation of a proto that wraps either the + "id" : "" (in case of inline) or the document proto (in + case of document) but here followed by exactly one + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only ``code`` and ``message``. + + - For Tables: Output depends on whether + + [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination] + or + + [bigquery_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.bigquery_destination] + is set (either is allowed). GCS case: In the created directory files + ``tables_1.csv``, ``tables_2.csv``,..., ``tables_N.csv`` will be + created, where N may be 1, and depends on the total number of the + successfully predicted rows. For all CLASSIFICATION + + [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: + Each .csv file will contain a header, listing all columns' + + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] + given on input followed by M target column names in the format of + + "<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>\_\_score" + where M is the number of distinct target values, i.e. number of + distinct values in the target column of the table used to train the + model. Subsequent lines will contain the respective values of + successfully predicted rows, with the last, i.e. the target, columns + having the corresponding prediction + [scores][google.cloud.automl.v1beta1.TablesAnnotation.score]. For + REGRESSION and FORECASTING + + [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: + Each .csv file will contain a header, listing all columns' + [display_name-s][google.cloud.automl.v1beta1.display_name] given on + input followed by the predicted target column with name in the + format of + + "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" + Subsequent lines will contain the respective values of successfully + predicted rows, with the last, i.e. the target, column having the + predicted target value. If prediction for any rows failed, then an + additional ``errors_1.csv``, ``errors_2.csv``,..., ``errors_N.csv`` + will be created (N depends on total number of failed rows). These + files will have analogous format as ``tables_*.csv``, but always + with a single target column having + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + represented as a JSON string, and containing only ``code`` and + ``message``. BigQuery case: + + [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] + pointing to a BigQuery project must be set. In the given project a + new dataset will be created with name + ``prediction__`` + where will be made BigQuery-dataset-name compatible (e.g. most + special characters will become underscores), and timestamp will be + in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the + dataset two tables will be created, ``predictions``, and ``errors``. + The ``predictions`` table's column names will be the input columns' + + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] + followed by the target column with name in the format of + + "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" + The input feature columns will contain the respective values of + successfully predicted rows, with the target column having an ARRAY + of + + [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], + represented as STRUCT-s, containing + [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. + The ``errors`` table contains rows for which the prediction has + failed, it has analogous input columns while the target column name + is in the format of + + "errors_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>", + and as a value has + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + represented as a STRUCT, and containing only ``code`` and + ``message``. + + Attributes: + gcs_destination (~.io.GcsDestination): + The Google Cloud Storage location of the + directory where the output is to be written to. + bigquery_destination (~.io.BigQueryDestination): + The BigQuery location where the output is to + be written to. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, number=1, oneof="destination", message="GcsDestination", + ) + + bigquery_destination = proto.Field( + proto.MESSAGE, number=2, oneof="destination", message="BigQueryDestination", + ) + + +class ModelExportOutputConfig(proto.Message): + r"""Output configuration for ModelExport Action. + + Attributes: + gcs_destination (~.io.GcsDestination): + The Google Cloud Storage location where the model is to be + written to. This location may only be set for the following + model formats: "tflite", "edgetpu_tflite", "tf_saved_model", + "tf_js", "core_ml". + + Under the directory given as the destination a new one with + name "model-export--", where timestamp is in + YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created. + Inside the model and any of its supporting files will be + written. + gcr_destination (~.io.GcrDestination): + The GCR location where model image is to be + pushed to. This location may only be set for the + following model formats: "docker". + + The model image will be created under the given + URI. + model_format (str): + The format in which the model must be exported. The + available, and default, formats depend on the problem and + model type (if given problem and type combination doesn't + have a format listed, it means its models are not + exportable): + + - For Image Classification mobile-low-latency-1, + mobile-versatile-1, mobile-high-accuracy-1: "tflite" + (default), "edgetpu_tflite", "tf_saved_model", "tf_js", + "docker". + + - For Image Classification mobile-core-ml-low-latency-1, + mobile-core-ml-versatile-1, + mobile-core-ml-high-accuracy-1: "core_ml" (default). + Formats description: + + - tflite - Used for Android mobile devices. + + - edgetpu_tflite - Used for `Edge + TPU `__ devices. + + - tf_saved_model - A tensorflow model in SavedModel format. + + - tf_js - A + `TensorFlow.js `__ model + that can be used in the browser and in Node.js using + JavaScript. + + - docker - Used for Docker containers. Use the params field + to customize the container. The container is verified to + work correctly on ubuntu 16.04 operating system. See more + at [containers + + quickstart](https: + //cloud.google.com/vision/automl/docs/containers-gcs-quickstart) + + - core_ml - Used for iOS mobile devices. + params (Sequence[~.io.ModelExportOutputConfig.ParamsEntry]): + Additional model-type and format specific parameters + describing the requirements for the to be exported model + files, any string must be up to 25000 characters long. + + - For ``docker`` format: ``cpu_architecture`` - (string) + "x86_64" (default). ``gpu_architecture`` - (string) + "none" (default), "nvidia". + """ + + gcs_destination = proto.Field( + proto.MESSAGE, number=1, oneof="destination", message="GcsDestination", + ) + + gcr_destination = proto.Field( + proto.MESSAGE, number=3, oneof="destination", message="GcrDestination", + ) + + model_format = proto.Field(proto.STRING, number=4) + + params = proto.MapField(proto.STRING, proto.STRING, number=2) + + +class ExportEvaluatedExamplesOutputConfig(proto.Message): + r"""Output configuration for ExportEvaluatedExamples Action. Note that + this call is available only for 30 days since the moment the model + was evaluated. The output depends on the domain, as follows (note + that only examples from the TEST set are exported): + + - For Tables: + + [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] + pointing to a BigQuery project must be set. In the given project a + new dataset will be created with name + + ``export_evaluated_examples__`` + where will be made BigQuery-dataset-name compatible (e.g. most + special characters will become underscores), and timestamp will be + in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the + dataset an ``evaluated_examples`` table will be created. It will + have all the same columns as the + + [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_spec_id] + of the [dataset][google.cloud.automl.v1beta1.Model.dataset_id] from + which the model was created, as they were at the moment of model's + evaluation (this includes the target column with its ground truth), + followed by a column called "predicted_". That last + column will contain the model's prediction result for each + respective row, given as ARRAY of + [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], + represented as STRUCT-s, containing + [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. + + Attributes: + bigquery_destination (~.io.BigQueryDestination): + The BigQuery location where the output is to + be written to. + """ + + bigquery_destination = proto.Field( + proto.MESSAGE, number=2, oneof="destination", message="BigQueryDestination", + ) + + +class GcsSource(proto.Message): + r"""The Google Cloud Storage location for the input content. + + Attributes: + input_uris (Sequence[str]): + Required. Google Cloud Storage URIs to input files, up to + 2000 characters long. Accepted forms: + + - Full object path, e.g. gs://bucket/directory/object.csv + """ + + input_uris = proto.RepeatedField(proto.STRING, number=1) + + +class BigQuerySource(proto.Message): + r"""The BigQuery location for the input content. + + Attributes: + input_uri (str): + Required. BigQuery URI to a table, up to 2000 characters + long. Accepted forms: + + - BigQuery path e.g. bq://projectId.bqDatasetId.bqTableId + """ + + input_uri = proto.Field(proto.STRING, number=1) + + +class GcsDestination(proto.Message): + r"""The Google Cloud Storage location where the output is to be + written to. + + Attributes: + output_uri_prefix (str): + Required. Google Cloud Storage URI to output directory, up + to 2000 characters long. Accepted forms: + + - Prefix path: gs://bucket/directory The requesting user + must have write permission to the bucket. The directory + is created if it doesn't exist. + """ + + output_uri_prefix = proto.Field(proto.STRING, number=1) + + +class BigQueryDestination(proto.Message): + r"""The BigQuery location for the output content. + + Attributes: + output_uri (str): + Required. BigQuery URI to a project, up to 2000 characters + long. Accepted forms: + + - BigQuery path e.g. bq://projectId + """ + + output_uri = proto.Field(proto.STRING, number=1) + + +class GcrDestination(proto.Message): + r"""The GCR location where the image must be pushed to. + + Attributes: + output_uri (str): + Required. Google Contained Registry URI of the new image, up + to 2000 characters long. See + + https: //cloud.google.com/container-registry/do // + cs/pushing-and-pulling#pushing_an_image_to_a_registry + Accepted forms: + + - [HOSTNAME]/[PROJECT-ID]/[IMAGE] + - [HOSTNAME]/[PROJECT-ID]/[IMAGE]:[TAG] + + The requesting user must have permission to push images the + project. + """ + + output_uri = proto.Field(proto.STRING, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/model.py b/google/cloud/automl_v1beta1/types/model.py new file mode 100644 index 00000000..2b22cc72 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/model.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import image +from google.cloud.automl_v1beta1.types import tables +from google.cloud.automl_v1beta1.types import text +from google.cloud.automl_v1beta1.types import translation +from google.cloud.automl_v1beta1.types import video +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module(package="google.cloud.automl.v1beta1", manifest={"Model",},) + + +class Model(proto.Message): + r"""API proto representing a trained machine learning model. + + Attributes: + translation_model_metadata (~.translation.TranslationModelMetadata): + Metadata for translation models. + image_classification_model_metadata (~.image.ImageClassificationModelMetadata): + Metadata for image classification models. + text_classification_model_metadata (~.text.TextClassificationModelMetadata): + Metadata for text classification models. + image_object_detection_model_metadata (~.image.ImageObjectDetectionModelMetadata): + Metadata for image object detection models. + video_classification_model_metadata (~.video.VideoClassificationModelMetadata): + Metadata for video classification models. + video_object_tracking_model_metadata (~.video.VideoObjectTrackingModelMetadata): + Metadata for video object tracking models. + text_extraction_model_metadata (~.text.TextExtractionModelMetadata): + Metadata for text extraction models. + tables_model_metadata (~.tables.TablesModelMetadata): + Metadata for Tables models. + text_sentiment_model_metadata (~.text.TextSentimentModelMetadata): + Metadata for text sentiment models. + name (str): + Output only. Resource name of the model. Format: + ``projects/{project_id}/locations/{location_id}/models/{model_id}`` + display_name (str): + Required. The name of the model to show in the interface. + The name can be up to 32 characters long and can consist + only of ASCII Latin letters A-Z and a-z, underscores (_), + and ASCII digits 0-9. It must start with a letter. + dataset_id (str): + Required. The resource ID of the dataset used + to create the model. The dataset must come from + the same ancestor project and location. + create_time (~.timestamp.Timestamp): + Output only. Timestamp when the model + training finished and can be used for + prediction. + update_time (~.timestamp.Timestamp): + Output only. Timestamp when this model was + last updated. + deployment_state (~.model.Model.DeploymentState): + Output only. Deployment state of the model. A + model can only serve prediction requests after + it gets deployed. + """ + + class DeploymentState(proto.Enum): + r"""Deployment state of the model.""" + DEPLOYMENT_STATE_UNSPECIFIED = 0 + DEPLOYED = 1 + UNDEPLOYED = 2 + + translation_model_metadata = proto.Field( + proto.MESSAGE, + number=15, + oneof="model_metadata", + message=translation.TranslationModelMetadata, + ) + + image_classification_model_metadata = proto.Field( + proto.MESSAGE, + number=13, + oneof="model_metadata", + message=image.ImageClassificationModelMetadata, + ) + + text_classification_model_metadata = proto.Field( + proto.MESSAGE, + number=14, + oneof="model_metadata", + message=text.TextClassificationModelMetadata, + ) + + image_object_detection_model_metadata = proto.Field( + proto.MESSAGE, + number=20, + oneof="model_metadata", + message=image.ImageObjectDetectionModelMetadata, + ) + + video_classification_model_metadata = proto.Field( + proto.MESSAGE, + number=23, + oneof="model_metadata", + message=video.VideoClassificationModelMetadata, + ) + + video_object_tracking_model_metadata = proto.Field( + proto.MESSAGE, + number=21, + oneof="model_metadata", + message=video.VideoObjectTrackingModelMetadata, + ) + + text_extraction_model_metadata = proto.Field( + proto.MESSAGE, + number=19, + oneof="model_metadata", + message=text.TextExtractionModelMetadata, + ) + + tables_model_metadata = proto.Field( + proto.MESSAGE, + number=24, + oneof="model_metadata", + message=tables.TablesModelMetadata, + ) + + text_sentiment_model_metadata = proto.Field( + proto.MESSAGE, + number=22, + oneof="model_metadata", + message=text.TextSentimentModelMetadata, + ) + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + dataset_id = proto.Field(proto.STRING, number=3) + + create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=11, message=timestamp.Timestamp,) + + deployment_state = proto.Field(proto.ENUM, number=8, enum=DeploymentState,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/model_evaluation.py b/google/cloud/automl_v1beta1/types/model_evaluation.py new file mode 100644 index 00000000..2027bb8a --- /dev/null +++ b/google/cloud/automl_v1beta1/types/model_evaluation.py @@ -0,0 +1,166 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import classification +from google.cloud.automl_v1beta1.types import detection +from google.cloud.automl_v1beta1.types import regression +from google.cloud.automl_v1beta1.types import text_extraction +from google.cloud.automl_v1beta1.types import text_sentiment +from google.cloud.automl_v1beta1.types import translation +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", manifest={"ModelEvaluation",}, +) + + +class ModelEvaluation(proto.Message): + r"""Evaluation results of a model. + + Attributes: + classification_evaluation_metrics (~.classification.ClassificationEvaluationMetrics): + Model evaluation metrics for image, text, + video and tables classification. + Tables problem is considered a classification + when the target column is CATEGORY DataType. + regression_evaluation_metrics (~.regression.RegressionEvaluationMetrics): + Model evaluation metrics for Tables + regression. Tables problem is considered a + regression when the target column has FLOAT64 + DataType. + translation_evaluation_metrics (~.translation.TranslationEvaluationMetrics): + Model evaluation metrics for translation. + image_object_detection_evaluation_metrics (~.detection.ImageObjectDetectionEvaluationMetrics): + Model evaluation metrics for image object + detection. + video_object_tracking_evaluation_metrics (~.detection.VideoObjectTrackingEvaluationMetrics): + Model evaluation metrics for video object + tracking. + text_sentiment_evaluation_metrics (~.text_sentiment.TextSentimentEvaluationMetrics): + Evaluation metrics for text sentiment models. + text_extraction_evaluation_metrics (~.text_extraction.TextExtractionEvaluationMetrics): + Evaluation metrics for text extraction + models. + name (str): + Output only. Resource name of the model evaluation. Format: + + ``projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}`` + annotation_spec_id (str): + Output only. The ID of the annotation spec that the model + evaluation applies to. The The ID is empty for the overall + model evaluation. For Tables annotation specs in the dataset + do not exist and this ID is always not set, but for + CLASSIFICATION + + [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type] + the + [display_name][google.cloud.automl.v1beta1.ModelEvaluation.display_name] + field is used. + display_name (str): + Output only. The value of + [display_name][google.cloud.automl.v1beta1.AnnotationSpec.display_name] + at the moment when the model was trained. Because this field + returns a value at model training time, for different models + trained from the same dataset, the values may differ, since + display names could had been changed between the two model's + trainings. For Tables CLASSIFICATION + + [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type] + distinct values of the target column at the moment of the + model evaluation are populated here. The display_name is + empty for the overall model evaluation. + create_time (~.timestamp.Timestamp): + Output only. Timestamp when this model + evaluation was created. + evaluated_example_count (int): + Output only. The number of examples used for model + evaluation, i.e. for which ground truth from time of model + creation is compared against the predicted annotations + created by the model. For overall ModelEvaluation (i.e. with + annotation_spec_id not set) this is the total number of all + examples used for evaluation. Otherwise, this is the count + of examples that according to the ground truth were + annotated by the + + [annotation_spec_id][google.cloud.automl.v1beta1.ModelEvaluation.annotation_spec_id]. + """ + + classification_evaluation_metrics = proto.Field( + proto.MESSAGE, + number=8, + oneof="metrics", + message=classification.ClassificationEvaluationMetrics, + ) + + regression_evaluation_metrics = proto.Field( + proto.MESSAGE, + number=24, + oneof="metrics", + message=regression.RegressionEvaluationMetrics, + ) + + translation_evaluation_metrics = proto.Field( + proto.MESSAGE, + number=9, + oneof="metrics", + message=translation.TranslationEvaluationMetrics, + ) + + image_object_detection_evaluation_metrics = proto.Field( + proto.MESSAGE, + number=12, + oneof="metrics", + message=detection.ImageObjectDetectionEvaluationMetrics, + ) + + video_object_tracking_evaluation_metrics = proto.Field( + proto.MESSAGE, + number=14, + oneof="metrics", + message=detection.VideoObjectTrackingEvaluationMetrics, + ) + + text_sentiment_evaluation_metrics = proto.Field( + proto.MESSAGE, + number=11, + oneof="metrics", + message=text_sentiment.TextSentimentEvaluationMetrics, + ) + + text_extraction_evaluation_metrics = proto.Field( + proto.MESSAGE, + number=13, + oneof="metrics", + message=text_extraction.TextExtractionEvaluationMetrics, + ) + + name = proto.Field(proto.STRING, number=1) + + annotation_spec_id = proto.Field(proto.STRING, number=2) + + display_name = proto.Field(proto.STRING, number=15) + + create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + evaluated_example_count = proto.Field(proto.INT32, number=6) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/operations.py b/google/cloud/automl_v1beta1/types/operations.py new file mode 100644 index 00000000..62d893fd --- /dev/null +++ b/google/cloud/automl_v1beta1/types/operations.py @@ -0,0 +1,308 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import io +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.rpc import status_pb2 as status # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={ + "OperationMetadata", + "DeleteOperationMetadata", + "DeployModelOperationMetadata", + "UndeployModelOperationMetadata", + "CreateModelOperationMetadata", + "ImportDataOperationMetadata", + "ExportDataOperationMetadata", + "BatchPredictOperationMetadata", + "ExportModelOperationMetadata", + "ExportEvaluatedExamplesOperationMetadata", + }, +) + + +class OperationMetadata(proto.Message): + r"""Metadata used across all long running operations returned by + AutoML API. + + Attributes: + delete_details (~.operations.DeleteOperationMetadata): + Details of a Delete operation. + deploy_model_details (~.operations.DeployModelOperationMetadata): + Details of a DeployModel operation. + undeploy_model_details (~.operations.UndeployModelOperationMetadata): + Details of an UndeployModel operation. + create_model_details (~.operations.CreateModelOperationMetadata): + Details of CreateModel operation. + import_data_details (~.operations.ImportDataOperationMetadata): + Details of ImportData operation. + batch_predict_details (~.operations.BatchPredictOperationMetadata): + Details of BatchPredict operation. + export_data_details (~.operations.ExportDataOperationMetadata): + Details of ExportData operation. + export_model_details (~.operations.ExportModelOperationMetadata): + Details of ExportModel operation. + export_evaluated_examples_details (~.operations.ExportEvaluatedExamplesOperationMetadata): + Details of ExportEvaluatedExamples operation. + progress_percent (int): + Output only. Progress of operation. Range: [0, 100]. Not + used currently. + partial_failures (Sequence[~.status.Status]): + Output only. Partial failures encountered. + E.g. single files that couldn't be read. + This field should never exceed 20 entries. + Status details field will contain standard GCP + error details. + create_time (~.timestamp.Timestamp): + Output only. Time when the operation was + created. + update_time (~.timestamp.Timestamp): + Output only. Time when the operation was + updated for the last time. + """ + + delete_details = proto.Field( + proto.MESSAGE, number=8, oneof="details", message="DeleteOperationMetadata", + ) + + deploy_model_details = proto.Field( + proto.MESSAGE, + number=24, + oneof="details", + message="DeployModelOperationMetadata", + ) + + undeploy_model_details = proto.Field( + proto.MESSAGE, + number=25, + oneof="details", + message="UndeployModelOperationMetadata", + ) + + create_model_details = proto.Field( + proto.MESSAGE, + number=10, + oneof="details", + message="CreateModelOperationMetadata", + ) + + import_data_details = proto.Field( + proto.MESSAGE, + number=15, + oneof="details", + message="ImportDataOperationMetadata", + ) + + batch_predict_details = proto.Field( + proto.MESSAGE, + number=16, + oneof="details", + message="BatchPredictOperationMetadata", + ) + + export_data_details = proto.Field( + proto.MESSAGE, + number=21, + oneof="details", + message="ExportDataOperationMetadata", + ) + + export_model_details = proto.Field( + proto.MESSAGE, + number=22, + oneof="details", + message="ExportModelOperationMetadata", + ) + + export_evaluated_examples_details = proto.Field( + proto.MESSAGE, + number=26, + oneof="details", + message="ExportEvaluatedExamplesOperationMetadata", + ) + + progress_percent = proto.Field(proto.INT32, number=13) + + partial_failures = proto.RepeatedField( + proto.MESSAGE, number=2, message=status.Status, + ) + + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + +class DeleteOperationMetadata(proto.Message): + r"""Details of operations that perform deletes of any entities.""" + + +class DeployModelOperationMetadata(proto.Message): + r"""Details of DeployModel operation.""" + + +class UndeployModelOperationMetadata(proto.Message): + r"""Details of UndeployModel operation.""" + + +class CreateModelOperationMetadata(proto.Message): + r"""Details of CreateModel operation.""" + + +class ImportDataOperationMetadata(proto.Message): + r"""Details of ImportData operation.""" + + +class ExportDataOperationMetadata(proto.Message): + r"""Details of ExportData operation. + + Attributes: + output_info (~.operations.ExportDataOperationMetadata.ExportDataOutputInfo): + Output only. Information further describing + this export data's output. + """ + + class ExportDataOutputInfo(proto.Message): + r"""Further describes this export data's output. Supplements + [OutputConfig][google.cloud.automl.v1beta1.OutputConfig]. + + Attributes: + gcs_output_directory (str): + The full path of the Google Cloud Storage + directory created, into which the exported data + is written. + bigquery_output_dataset (str): + The path of the BigQuery dataset created, in + bq://projectId.bqDatasetId format, into which + the exported data is written. + """ + + gcs_output_directory = proto.Field( + proto.STRING, number=1, oneof="output_location" + ) + + bigquery_output_dataset = proto.Field( + proto.STRING, number=2, oneof="output_location" + ) + + output_info = proto.Field(proto.MESSAGE, number=1, message=ExportDataOutputInfo,) + + +class BatchPredictOperationMetadata(proto.Message): + r"""Details of BatchPredict operation. + + Attributes: + input_config (~.io.BatchPredictInputConfig): + Output only. The input config that was given + upon starting this batch predict operation. + output_info (~.operations.BatchPredictOperationMetadata.BatchPredictOutputInfo): + Output only. Information further describing + this batch predict's output. + """ + + class BatchPredictOutputInfo(proto.Message): + r"""Further describes this batch predict's output. Supplements + + [BatchPredictOutputConfig][google.cloud.automl.v1beta1.BatchPredictOutputConfig]. + + Attributes: + gcs_output_directory (str): + The full path of the Google Cloud Storage + directory created, into which the prediction + output is written. + bigquery_output_dataset (str): + The path of the BigQuery dataset created, in + bq://projectId.bqDatasetId format, into which + the prediction output is written. + """ + + gcs_output_directory = proto.Field( + proto.STRING, number=1, oneof="output_location" + ) + + bigquery_output_dataset = proto.Field( + proto.STRING, number=2, oneof="output_location" + ) + + input_config = proto.Field( + proto.MESSAGE, number=1, message=io.BatchPredictInputConfig, + ) + + output_info = proto.Field(proto.MESSAGE, number=2, message=BatchPredictOutputInfo,) + + +class ExportModelOperationMetadata(proto.Message): + r"""Details of ExportModel operation. + + Attributes: + output_info (~.operations.ExportModelOperationMetadata.ExportModelOutputInfo): + Output only. Information further describing + the output of this model export. + """ + + class ExportModelOutputInfo(proto.Message): + r"""Further describes the output of model export. Supplements + + [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. + + Attributes: + gcs_output_directory (str): + The full path of the Google Cloud Storage + directory created, into which the model will be + exported. + """ + + gcs_output_directory = proto.Field(proto.STRING, number=1) + + output_info = proto.Field(proto.MESSAGE, number=2, message=ExportModelOutputInfo,) + + +class ExportEvaluatedExamplesOperationMetadata(proto.Message): + r"""Details of EvaluatedExamples operation. + + Attributes: + output_info (~.operations.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo): + Output only. Information further describing + the output of this evaluated examples export. + """ + + class ExportEvaluatedExamplesOutputInfo(proto.Message): + r"""Further describes the output of the evaluated examples export. + Supplements + + [ExportEvaluatedExamplesOutputConfig][google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig]. + + Attributes: + bigquery_output_dataset (str): + The path of the BigQuery dataset created, in + bq://projectId.bqDatasetId format, into which + the output of export evaluated examples is + written. + """ + + bigquery_output_dataset = proto.Field(proto.STRING, number=2) + + output_info = proto.Field( + proto.MESSAGE, number=2, message=ExportEvaluatedExamplesOutputInfo, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/prediction_service.py b/google/cloud/automl_v1beta1/types/prediction_service.py new file mode 100644 index 00000000..4ea8fb68 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/prediction_service.py @@ -0,0 +1,257 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import annotation_payload +from google.cloud.automl_v1beta1.types import data_items +from google.cloud.automl_v1beta1.types import io + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={ + "PredictRequest", + "PredictResponse", + "BatchPredictRequest", + "BatchPredictResult", + }, +) + + +class PredictRequest(proto.Message): + r"""Request message for + [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. + + Attributes: + name (str): + Required. Name of the model requested to + serve the prediction. + payload (~.data_items.ExamplePayload): + Required. Payload to perform a prediction on. + The payload must match the problem type that the + model was trained to solve. + params (Sequence[~.prediction_service.PredictRequest.ParamsEntry]): + Additional domain-specific parameters, any string must be up + to 25000 characters long. + + - For Image Classification: + + ``score_threshold`` - (float) A value from 0.0 to 1.0. + When the model makes predictions for an image, it will + only produce results that have at least this confidence + score. The default is 0.5. + + - For Image Object Detection: ``score_threshold`` - (float) + When Model detects objects on the image, it will only + produce bounding boxes which have at least this + confidence score. Value in 0 to 1 range, default is 0.5. + ``max_bounding_box_count`` - (int64) No more than this + number of bounding boxes will be returned in the + response. Default is 100, the requested value may be + limited by server. + + - For Tables: feature_importance - (boolean) Whether + feature importance should be populated in the returned + TablesAnnotation. The default is false. + """ + + name = proto.Field(proto.STRING, number=1) + + payload = proto.Field(proto.MESSAGE, number=2, message=data_items.ExamplePayload,) + + params = proto.MapField(proto.STRING, proto.STRING, number=3) + + +class PredictResponse(proto.Message): + r"""Response message for + [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. + + Attributes: + payload (Sequence[~.annotation_payload.AnnotationPayload]): + Prediction result. + Translation and Text Sentiment will return + precisely one payload. + preprocessed_input (~.data_items.ExamplePayload): + The preprocessed example that AutoML actually makes + prediction on. Empty if AutoML does not preprocess the input + example. + + - For Text Extraction: If the input is a .pdf file, the + OCR'ed text will be provided in + [document_text][google.cloud.automl.v1beta1.Document.document_text]. + metadata (Sequence[~.prediction_service.PredictResponse.MetadataEntry]): + Additional domain-specific prediction response metadata. + + - For Image Object Detection: ``max_bounding_box_count`` - + (int64) At most that many bounding boxes per image could + have been returned. + + - For Text Sentiment: ``sentiment_score`` - (float, + deprecated) A value between -1 and 1, -1 maps to least + positive sentiment, while 1 maps to the most positive one + and the higher the score, the more positive the sentiment + in the document is. Yet these values are relative to the + training data, so e.g. if all data was positive then -1 + will be also positive (though the least). The + sentiment_score shouldn't be confused with "score" or + "magnitude" from the previous Natural Language Sentiment + Analysis API. + """ + + payload = proto.RepeatedField( + proto.MESSAGE, number=1, message=annotation_payload.AnnotationPayload, + ) + + preprocessed_input = proto.Field( + proto.MESSAGE, number=3, message=data_items.ExamplePayload, + ) + + metadata = proto.MapField(proto.STRING, proto.STRING, number=2) + + +class BatchPredictRequest(proto.Message): + r"""Request message for + [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. + + Attributes: + name (str): + Required. Name of the model requested to + serve the batch prediction. + input_config (~.io.BatchPredictInputConfig): + Required. The input configuration for batch + prediction. + output_config (~.io.BatchPredictOutputConfig): + Required. The Configuration specifying where + output predictions should be written. + params (Sequence[~.prediction_service.BatchPredictRequest.ParamsEntry]): + Required. Additional domain-specific parameters for the + predictions, any string must be up to 25000 characters long. + + - For Text Classification: + + ``score_threshold`` - (float) A value from 0.0 to 1.0. + When the model makes predictions for a text snippet, it + will only produce results that have at least this + confidence score. The default is 0.5. + + - For Image Classification: + + ``score_threshold`` - (float) A value from 0.0 to 1.0. + When the model makes predictions for an image, it will + only produce results that have at least this confidence + score. The default is 0.5. + + - For Image Object Detection: + + ``score_threshold`` - (float) When Model detects objects + on the image, it will only produce bounding boxes which + have at least this confidence score. Value in 0 to 1 + range, default is 0.5. ``max_bounding_box_count`` - + (int64) No more than this number of bounding boxes will + be produced per image. Default is 100, the requested + value may be limited by server. + + - For Video Classification : + + ``score_threshold`` - (float) A value from 0.0 to 1.0. + When the model makes predictions for a video, it will + only produce results that have at least this confidence + score. The default is 0.5. ``segment_classification`` - + (boolean) Set to true to request segment-level + classification. AutoML Video Intelligence returns labels + and their confidence scores for the entire segment of the + video that user specified in the request configuration. + The default is "true". ``shot_classification`` - + (boolean) Set to true to request shot-level + classification. AutoML Video Intelligence determines the + boundaries for each camera shot in the entire segment of + the video that user specified in the request + configuration. AutoML Video Intelligence then returns + labels and their confidence scores for each detected + shot, along with the start and end time of the shot. + WARNING: Model evaluation is not done for this + classification type, the quality of it depends on + training data, but there are no metrics provided to + describe that quality. The default is "false". + ``1s_interval_classification`` - (boolean) Set to true to + request classification for a video at one-second + intervals. AutoML Video Intelligence returns labels and + their confidence scores for each second of the entire + segment of the video that user specified in the request + configuration. WARNING: Model evaluation is not done for + this classification type, the quality of it depends on + training data, but there are no metrics provided to + describe that quality. The default is "false". + + - For Tables: + + feature_importance - (boolean) Whether feature importance + should be populated in the returned TablesAnnotations. + The default is false. + + - For Video Object Tracking: + + ``score_threshold`` - (float) When Model detects objects + on video frames, it will only produce bounding boxes + which have at least this confidence score. Value in 0 to + 1 range, default is 0.5. ``max_bounding_box_count`` - + (int64) No more than this number of bounding boxes will + be returned per frame. Default is 100, the requested + value may be limited by server. ``min_bounding_box_size`` + - (float) Only bounding boxes with shortest edge at least + that long as a relative value of video frame size will be + returned. Value in 0 to 1 range. Default is 0. + """ + + name = proto.Field(proto.STRING, number=1) + + input_config = proto.Field( + proto.MESSAGE, number=3, message=io.BatchPredictInputConfig, + ) + + output_config = proto.Field( + proto.MESSAGE, number=4, message=io.BatchPredictOutputConfig, + ) + + params = proto.MapField(proto.STRING, proto.STRING, number=5) + + +class BatchPredictResult(proto.Message): + r"""Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of the operation + returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. + + Attributes: + metadata (Sequence[~.prediction_service.BatchPredictResult.MetadataEntry]): + Additional domain-specific prediction response metadata. + + - For Image Object Detection: ``max_bounding_box_count`` - + (int64) At most that many bounding boxes per image could + have been returned. + + - For Video Object Tracking: ``max_bounding_box_count`` - + (int64) At most that many bounding boxes per frame could + have been returned. + """ + + metadata = proto.MapField(proto.STRING, proto.STRING, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/ranges.py b/google/cloud/automl_v1beta1/types/ranges.py new file mode 100644 index 00000000..f9e72a2a --- /dev/null +++ b/google/cloud/automl_v1beta1/types/ranges.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", manifest={"DoubleRange",}, +) + + +class DoubleRange(proto.Message): + r"""A range between two double numbers. + + Attributes: + start (float): + Start of the range, inclusive. + end (float): + End of the range, exclusive. + """ + + start = proto.Field(proto.DOUBLE, number=1) + + end = proto.Field(proto.DOUBLE, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/regression.py b/google/cloud/automl_v1beta1/types/regression.py new file mode 100644 index 00000000..f952a396 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/regression.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", manifest={"RegressionEvaluationMetrics",}, +) + + +class RegressionEvaluationMetrics(proto.Message): + r"""Metrics for regression problems. + + Attributes: + root_mean_squared_error (float): + Output only. Root Mean Squared Error (RMSE). + mean_absolute_error (float): + Output only. Mean Absolute Error (MAE). + mean_absolute_percentage_error (float): + Output only. Mean absolute percentage error. + Only set if all ground truth values are are + positive. + r_squared (float): + Output only. R squared. + root_mean_squared_log_error (float): + Output only. Root mean squared log error. + """ + + root_mean_squared_error = proto.Field(proto.FLOAT, number=1) + + mean_absolute_error = proto.Field(proto.FLOAT, number=2) + + mean_absolute_percentage_error = proto.Field(proto.FLOAT, number=3) + + r_squared = proto.Field(proto.FLOAT, number=4) + + root_mean_squared_log_error = proto.Field(proto.FLOAT, number=5) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/service.py b/google/cloud/automl_v1beta1/types/service.py new file mode 100644 index 00000000..cfe23a6f --- /dev/null +++ b/google/cloud/automl_v1beta1/types/service.py @@ -0,0 +1,704 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec +from google.cloud.automl_v1beta1.types import dataset as gca_dataset +from google.cloud.automl_v1beta1.types import image +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import model as gca_model +from google.cloud.automl_v1beta1.types import model_evaluation as gca_model_evaluation +from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec +from google.protobuf import field_mask_pb2 as gp_field_mask # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={ + "CreateDatasetRequest", + "GetDatasetRequest", + "ListDatasetsRequest", + "ListDatasetsResponse", + "UpdateDatasetRequest", + "DeleteDatasetRequest", + "ImportDataRequest", + "ExportDataRequest", + "GetAnnotationSpecRequest", + "GetTableSpecRequest", + "ListTableSpecsRequest", + "ListTableSpecsResponse", + "UpdateTableSpecRequest", + "GetColumnSpecRequest", + "ListColumnSpecsRequest", + "ListColumnSpecsResponse", + "UpdateColumnSpecRequest", + "CreateModelRequest", + "GetModelRequest", + "ListModelsRequest", + "ListModelsResponse", + "DeleteModelRequest", + "DeployModelRequest", + "UndeployModelRequest", + "ExportModelRequest", + "ExportEvaluatedExamplesRequest", + "GetModelEvaluationRequest", + "ListModelEvaluationsRequest", + "ListModelEvaluationsResponse", + }, +) + + +class CreateDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. + + Attributes: + parent (str): + Required. The resource name of the project to + create the dataset for. + dataset (~.gca_dataset.Dataset): + Required. The dataset to create. + """ + + parent = proto.Field(proto.STRING, number=1) + + dataset = proto.Field(proto.MESSAGE, number=2, message=gca_dataset.Dataset,) + + +class GetDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. + + Attributes: + name (str): + Required. The resource name of the dataset to + retrieve. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListDatasetsRequest(proto.Message): + r"""Request message for + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. + + Attributes: + parent (str): + Required. The resource name of the project + from which to list datasets. + filter (str): + An expression for filtering the results of the request. + + - ``dataset_metadata`` - for existence of the case (e.g. + image_classification_dataset_metadata:*). Some examples + of using the filter are: + + - ``translation_dataset_metadata:*`` --> The dataset has + translation_dataset_metadata. + page_size (int): + Requested page size. Server may return fewer + results than requested. If unspecified, server + will pick a default size. + page_token (str): + A token identifying a page of results for the server to + return Typically obtained via + [ListDatasetsResponse.next_page_token][google.cloud.automl.v1beta1.ListDatasetsResponse.next_page_token] + of the previous + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets] + call. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=3) + + page_size = proto.Field(proto.INT32, number=4) + + page_token = proto.Field(proto.STRING, number=6) + + +class ListDatasetsResponse(proto.Message): + r"""Response message for + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. + + Attributes: + datasets (Sequence[~.gca_dataset.Dataset]): + The datasets read. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListDatasetsRequest.page_token][google.cloud.automl.v1beta1.ListDatasetsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + datasets = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_dataset.Dataset, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] + + Attributes: + dataset (~.gca_dataset.Dataset): + Required. The dataset which replaces the + resource on the server. + update_mask (~.gp_field_mask.FieldMask): + The update mask applies to the resource. + """ + + dataset = proto.Field(proto.MESSAGE, number=1, message=gca_dataset.Dataset,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,) + + +class DeleteDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. + + Attributes: + name (str): + Required. The resource name of the dataset to + delete. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ImportDataRequest(proto.Message): + r"""Request message for + [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. + + Attributes: + name (str): + Required. Dataset name. Dataset must already + exist. All imported annotations and examples + will be added. + input_config (~.io.InputConfig): + Required. The desired input location and its + domain specific semantics, if any. + """ + + name = proto.Field(proto.STRING, number=1) + + input_config = proto.Field(proto.MESSAGE, number=3, message=io.InputConfig,) + + +class ExportDataRequest(proto.Message): + r"""Request message for + [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. + + Attributes: + name (str): + Required. The resource name of the dataset. + output_config (~.io.OutputConfig): + Required. The desired output location. + """ + + name = proto.Field(proto.STRING, number=1) + + output_config = proto.Field(proto.MESSAGE, number=3, message=io.OutputConfig,) + + +class GetAnnotationSpecRequest(proto.Message): + r"""Request message for + [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. + + Attributes: + name (str): + Required. The resource name of the annotation + spec to retrieve. + """ + + name = proto.Field(proto.STRING, number=1) + + +class GetTableSpecRequest(proto.Message): + r"""Request message for + [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. + + Attributes: + name (str): + Required. The resource name of the table spec + to retrieve. + field_mask (~.gp_field_mask.FieldMask): + Mask specifying which fields to read. + """ + + name = proto.Field(proto.STRING, number=1) + + field_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,) + + +class ListTableSpecsRequest(proto.Message): + r"""Request message for + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. + + Attributes: + parent (str): + Required. The resource name of the dataset to + list table specs from. + field_mask (~.gp_field_mask.FieldMask): + Mask specifying which fields to read. + filter (str): + Filter expression, see go/filtering. + page_size (int): + Requested page size. The server can return + fewer results than requested. If unspecified, + the server will pick a default size. + page_token (str): + A token identifying a page of results for the server to + return. Typically obtained from the + [ListTableSpecsResponse.next_page_token][google.cloud.automl.v1beta1.ListTableSpecsResponse.next_page_token] + field of the previous + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs] + call. + """ + + parent = proto.Field(proto.STRING, number=1) + + field_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,) + + filter = proto.Field(proto.STRING, number=3) + + page_size = proto.Field(proto.INT32, number=4) + + page_token = proto.Field(proto.STRING, number=6) + + +class ListTableSpecsResponse(proto.Message): + r"""Response message for + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. + + Attributes: + table_specs (Sequence[~.gca_table_spec.TableSpec]): + The table specs read. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListTableSpecsRequest.page_token][google.cloud.automl.v1beta1.ListTableSpecsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + table_specs = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_table_spec.TableSpec, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateTableSpecRequest(proto.Message): + r"""Request message for + [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] + + Attributes: + table_spec (~.gca_table_spec.TableSpec): + Required. The table spec which replaces the + resource on the server. + update_mask (~.gp_field_mask.FieldMask): + The update mask applies to the resource. + """ + + table_spec = proto.Field(proto.MESSAGE, number=1, message=gca_table_spec.TableSpec,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,) + + +class GetColumnSpecRequest(proto.Message): + r"""Request message for + [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. + + Attributes: + name (str): + Required. The resource name of the column + spec to retrieve. + field_mask (~.gp_field_mask.FieldMask): + Mask specifying which fields to read. + """ + + name = proto.Field(proto.STRING, number=1) + + field_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,) + + +class ListColumnSpecsRequest(proto.Message): + r"""Request message for + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. + + Attributes: + parent (str): + Required. The resource name of the table spec + to list column specs from. + field_mask (~.gp_field_mask.FieldMask): + Mask specifying which fields to read. + filter (str): + Filter expression, see go/filtering. + page_size (int): + Requested page size. The server can return + fewer results than requested. If unspecified, + the server will pick a default size. + page_token (str): + A token identifying a page of results for the server to + return. Typically obtained from the + [ListColumnSpecsResponse.next_page_token][google.cloud.automl.v1beta1.ListColumnSpecsResponse.next_page_token] + field of the previous + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs] + call. + """ + + parent = proto.Field(proto.STRING, number=1) + + field_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,) + + filter = proto.Field(proto.STRING, number=3) + + page_size = proto.Field(proto.INT32, number=4) + + page_token = proto.Field(proto.STRING, number=6) + + +class ListColumnSpecsResponse(proto.Message): + r"""Response message for + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. + + Attributes: + column_specs (Sequence[~.gca_column_spec.ColumnSpec]): + The column specs read. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListColumnSpecsRequest.page_token][google.cloud.automl.v1beta1.ListColumnSpecsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + column_specs = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_column_spec.ColumnSpec, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateColumnSpecRequest(proto.Message): + r"""Request message for + [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] + + Attributes: + column_spec (~.gca_column_spec.ColumnSpec): + Required. The column spec which replaces the + resource on the server. + update_mask (~.gp_field_mask.FieldMask): + The update mask applies to the resource. + """ + + column_spec = proto.Field( + proto.MESSAGE, number=1, message=gca_column_spec.ColumnSpec, + ) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,) + + +class CreateModelRequest(proto.Message): + r"""Request message for + [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. + + Attributes: + parent (str): + Required. Resource name of the parent project + where the model is being created. + model (~.gca_model.Model): + Required. The model to create. + """ + + parent = proto.Field(proto.STRING, number=1) + + model = proto.Field(proto.MESSAGE, number=4, message=gca_model.Model,) + + +class GetModelRequest(proto.Message): + r"""Request message for + [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. + + Attributes: + name (str): + Required. Resource name of the model. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListModelsRequest(proto.Message): + r"""Request message for + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + + Attributes: + parent (str): + Required. Resource name of the project, from + which to list the models. + filter (str): + An expression for filtering the results of the request. + + - ``model_metadata`` - for existence of the case (e.g. + video_classification_model_metadata:*). + + - ``dataset_id`` - for = or !=. Some examples of using the + filter are: + + - ``image_classification_model_metadata:*`` --> The model + has image_classification_model_metadata. + + - ``dataset_id=5`` --> The model was created from a dataset + with ID 5. + page_size (int): + Requested page size. + page_token (str): + A token identifying a page of results for the server to + return Typically obtained via + [ListModelsResponse.next_page_token][google.cloud.automl.v1beta1.ListModelsResponse.next_page_token] + of the previous + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels] + call. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=3) + + page_size = proto.Field(proto.INT32, number=4) + + page_token = proto.Field(proto.STRING, number=6) + + +class ListModelsResponse(proto.Message): + r"""Response message for + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + + Attributes: + model (Sequence[~.gca_model.Model]): + List of models in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelsRequest.page_token][google.cloud.automl.v1beta1.ListModelsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + model = proto.RepeatedField(proto.MESSAGE, number=1, message=gca_model.Model,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class DeleteModelRequest(proto.Message): + r"""Request message for + [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. + + Attributes: + name (str): + Required. Resource name of the model being + deleted. + """ + + name = proto.Field(proto.STRING, number=1) + + +class DeployModelRequest(proto.Message): + r"""Request message for + [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. + + Attributes: + image_object_detection_model_deployment_metadata (~.image.ImageObjectDetectionModelDeploymentMetadata): + Model deployment metadata specific to Image + Object Detection. + image_classification_model_deployment_metadata (~.image.ImageClassificationModelDeploymentMetadata): + Model deployment metadata specific to Image + Classification. + name (str): + Required. Resource name of the model to + deploy. + """ + + image_object_detection_model_deployment_metadata = proto.Field( + proto.MESSAGE, + number=2, + oneof="model_deployment_metadata", + message=image.ImageObjectDetectionModelDeploymentMetadata, + ) + + image_classification_model_deployment_metadata = proto.Field( + proto.MESSAGE, + number=4, + oneof="model_deployment_metadata", + message=image.ImageClassificationModelDeploymentMetadata, + ) + + name = proto.Field(proto.STRING, number=1) + + +class UndeployModelRequest(proto.Message): + r"""Request message for + [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. + + Attributes: + name (str): + Required. Resource name of the model to + undeploy. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ExportModelRequest(proto.Message): + r"""Request message for + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an error code + will be returned. + + Attributes: + name (str): + Required. The resource name of the model to + export. + output_config (~.io.ModelExportOutputConfig): + Required. The desired output location and + configuration. + """ + + name = proto.Field(proto.STRING, number=1) + + output_config = proto.Field( + proto.MESSAGE, number=3, message=io.ModelExportOutputConfig, + ) + + +class ExportEvaluatedExamplesRequest(proto.Message): + r"""Request message for + [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. + + Attributes: + name (str): + Required. The resource name of the model + whose evaluated examples are to be exported. + output_config (~.io.ExportEvaluatedExamplesOutputConfig): + Required. The desired output location and + configuration. + """ + + name = proto.Field(proto.STRING, number=1) + + output_config = proto.Field( + proto.MESSAGE, number=3, message=io.ExportEvaluatedExamplesOutputConfig, + ) + + +class GetModelEvaluationRequest(proto.Message): + r"""Request message for + [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. + + Attributes: + name (str): + Required. Resource name for the model + evaluation. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListModelEvaluationsRequest(proto.Message): + r"""Request message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. + + Attributes: + parent (str): + Required. Resource name of the model to list + the model evaluations for. If modelId is set as + "-", this will list model evaluations from + across all models of the parent location. + filter (str): + An expression for filtering the results of the request. + + - ``annotation_spec_id`` - for =, != or existence. See + example below for the last. + + Some examples of using the filter are: + + - ``annotation_spec_id!=4`` --> The model evaluation was + done for annotation spec with ID different than 4. + - ``NOT annotation_spec_id:*`` --> The model evaluation was + done for aggregate of all annotation specs. + page_size (int): + Requested page size. + page_token (str): + A token identifying a page of results for the server to + return. Typically obtained via + [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1beta1.ListModelEvaluationsResponse.next_page_token] + of the previous + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations] + call. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=3) + + page_size = proto.Field(proto.INT32, number=4) + + page_token = proto.Field(proto.STRING, number=6) + + +class ListModelEvaluationsResponse(proto.Message): + r"""Response message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. + + Attributes: + model_evaluation (Sequence[~.gca_model_evaluation.ModelEvaluation]): + List of model evaluations in the requested + page. + next_page_token (str): + A token to retrieve next page of results. Pass to the + [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1beta1.ListModelEvaluationsRequest.page_token] + field of a new + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations] + request to obtain that page. + """ + + @property + def raw_page(self): + return self + + model_evaluation = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_model_evaluation.ModelEvaluation, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/table_spec.py b/google/cloud/automl_v1beta1/types/table_spec.py new file mode 100644 index 00000000..8a9b7ce5 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/table_spec.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import io + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", manifest={"TableSpec",}, +) + + +class TableSpec(proto.Message): + r"""A specification of a relational table. The table's schema is + represented via its child column specs. It is pre-populated as part + of ImportData by schema inference algorithm, the version of which is + a required parameter of ImportData InputConfig. Note: While working + with a table, at times the schema may be inconsistent with the data + in the table (e.g. string in a FLOAT64 column). The consistency + validation is done upon creation of a model. Used by: + + - Tables + + Attributes: + name (str): + Output only. The resource name of the table spec. Form: + + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/tableSpecs/{table_spec_id}`` + time_column_spec_id (str): + column_spec_id of the time column. Only used if the parent + dataset's ml_use_column_spec_id is not set. Used to split + rows into TRAIN, VALIDATE and TEST sets such that oldest + rows go to TRAIN set, newest to TEST, and those in between + to VALIDATE. Required type: TIMESTAMP. If both this column + and ml_use_column are not set, then ML use of all rows will + be assigned by AutoML. NOTE: Updates of this field will + instantly affect any other users concurrently working with + the dataset. + row_count (int): + Output only. The number of rows (i.e. + examples) in the table. + valid_row_count (int): + Output only. The number of valid rows (i.e. + without values that don't match DataType-s of + their columns). + column_count (int): + Output only. The number of columns of the + table. That is, the number of child + ColumnSpec-s. + input_configs (Sequence[~.io.InputConfig]): + Output only. Input configs via which data + currently residing in the table had been + imported. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + """ + + name = proto.Field(proto.STRING, number=1) + + time_column_spec_id = proto.Field(proto.STRING, number=2) + + row_count = proto.Field(proto.INT64, number=3) + + valid_row_count = proto.Field(proto.INT64, number=4) + + column_count = proto.Field(proto.INT64, number=7) + + input_configs = proto.RepeatedField( + proto.MESSAGE, number=5, message=io.InputConfig, + ) + + etag = proto.Field(proto.STRING, number=6) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/tables.py b/google/cloud/automl_v1beta1/types/tables.py new file mode 100644 index 00000000..affe5e37 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/tables.py @@ -0,0 +1,369 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import data_stats +from google.cloud.automl_v1beta1.types import ranges +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={ + "TablesDatasetMetadata", + "TablesModelMetadata", + "TablesAnnotation", + "TablesModelColumnInfo", + }, +) + + +class TablesDatasetMetadata(proto.Message): + r"""Metadata for a dataset used for AutoML Tables. + + Attributes: + primary_table_spec_id (str): + Output only. The table_spec_id of the primary table of this + dataset. + target_column_spec_id (str): + column_spec_id of the primary table's column that should be + used as the training & prediction target. This column must + be non-nullable and have one of following data types + (otherwise model creation will error): + + - CATEGORY + + - FLOAT64 + + If the type is CATEGORY , only up to 100 unique values may + exist in that column across all rows. + + NOTE: Updates of this field will instantly affect any other + users concurrently working with the dataset. + weight_column_spec_id (str): + column_spec_id of the primary table's column that should be + used as the weight column, i.e. the higher the value the + more important the row will be during model training. + Required type: FLOAT64. Allowed values: 0 to 10000, + inclusive on both ends; 0 means the row is ignored for + training. If not set all rows are assumed to have equal + weight of 1. NOTE: Updates of this field will instantly + affect any other users concurrently working with the + dataset. + ml_use_column_spec_id (str): + column_spec_id of the primary table column which specifies a + possible ML use of the row, i.e. the column will be used to + split the rows into TRAIN, VALIDATE and TEST sets. Required + type: STRING. This column, if set, must either have all of + ``TRAIN``, ``VALIDATE``, ``TEST`` among its values, or only + have ``TEST``, ``UNASSIGNED`` values. In the latter case the + rows with ``UNASSIGNED`` value will be assigned by AutoML. + Note that if a given ml use distribution makes it impossible + to create a "good" model, that call will error describing + the issue. If both this column_spec_id and primary table's + time_column_spec_id are not set, then all rows are treated + as ``UNASSIGNED``. NOTE: Updates of this field will + instantly affect any other users concurrently working with + the dataset. + target_column_correlations (Sequence[~.tables.TablesDatasetMetadata.TargetColumnCorrelationsEntry]): + Output only. Correlations between + + [TablesDatasetMetadata.target_column_spec_id][google.cloud.automl.v1beta1.TablesDatasetMetadata.target_column_spec_id], + and other columns of the + + [TablesDatasetMetadataprimary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_spec_id]. + Only set if the target column is set. Mapping from other + column spec id to its CorrelationStats with the target + column. This field may be stale, see the stats_update_time + field for for the timestamp at which these stats were last + updated. + stats_update_time (~.timestamp.Timestamp): + Output only. The most recent timestamp when + target_column_correlations field and all descendant + ColumnSpec.data_stats and ColumnSpec.top_correlated_columns + fields were last (re-)generated. Any changes that happened + to the dataset afterwards are not reflected in these fields + values. The regeneration happens in the background on a best + effort basis. + """ + + primary_table_spec_id = proto.Field(proto.STRING, number=1) + + target_column_spec_id = proto.Field(proto.STRING, number=2) + + weight_column_spec_id = proto.Field(proto.STRING, number=3) + + ml_use_column_spec_id = proto.Field(proto.STRING, number=4) + + target_column_correlations = proto.MapField( + proto.STRING, proto.MESSAGE, number=6, message=data_stats.CorrelationStats, + ) + + stats_update_time = proto.Field( + proto.MESSAGE, number=7, message=timestamp.Timestamp, + ) + + +class TablesModelMetadata(proto.Message): + r"""Model metadata specific to AutoML Tables. + + Attributes: + optimization_objective_recall_value (float): + Required when optimization_objective is + "MAXIMIZE_PRECISION_AT_RECALL". Must be between 0 and 1, + inclusive. + optimization_objective_precision_value (float): + Required when optimization_objective is + "MAXIMIZE_RECALL_AT_PRECISION". Must be between 0 and 1, + inclusive. + target_column_spec (~.column_spec.ColumnSpec): + Column spec of the dataset's primary table's column the + model is predicting. Snapshotted when model creation + started. Only 3 fields are used: name - May be set on + CreateModel, if it's not then the ColumnSpec corresponding + to the current target_column_spec_id of the dataset the + model is trained from is used. If neither is set, + CreateModel will error. display_name - Output only. + data_type - Output only. + input_feature_column_specs (Sequence[~.column_spec.ColumnSpec]): + Column specs of the dataset's primary table's columns, on + which the model is trained and which are used as the input + for predictions. The + + [target_column][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + as well as, according to dataset's state upon model + creation, + + [weight_column][google.cloud.automl.v1beta1.TablesDatasetMetadata.weight_column_spec_id], + and + + [ml_use_column][google.cloud.automl.v1beta1.TablesDatasetMetadata.ml_use_column_spec_id] + must never be included here. + + Only 3 fields are used: + + - name - May be set on CreateModel, if set only the columns + specified are used, otherwise all primary table's columns + (except the ones listed above) are used for the training + and prediction input. + + - display_name - Output only. + + - data_type - Output only. + optimization_objective (str): + Objective function the model is optimizing towards. The + training process creates a model that maximizes/minimizes + the value of the objective function over the validation set. + + The supported optimization objectives depend on the + prediction type. If the field is not set, a default + objective function is used. + + CLASSIFICATION_BINARY: "MAXIMIZE_AU_ROC" (default) - + Maximize the area under the receiver operating + characteristic (ROC) curve. "MINIMIZE_LOG_LOSS" - Minimize + log loss. "MAXIMIZE_AU_PRC" - Maximize the area under the + precision-recall curve. "MAXIMIZE_PRECISION_AT_RECALL" - + Maximize precision for a specified recall value. + "MAXIMIZE_RECALL_AT_PRECISION" - Maximize recall for a + specified precision value. + + CLASSIFICATION_MULTI_CLASS : "MINIMIZE_LOG_LOSS" (default) - + Minimize log loss. + + REGRESSION: "MINIMIZE_RMSE" (default) - Minimize + root-mean-squared error (RMSE). "MINIMIZE_MAE" - Minimize + mean-absolute error (MAE). "MINIMIZE_RMSLE" - Minimize + root-mean-squared log error (RMSLE). + tables_model_column_info (Sequence[~.tables.TablesModelColumnInfo]): + Output only. Auxiliary information for each of the + input_feature_column_specs with respect to this particular + model. + train_budget_milli_node_hours (int): + Required. The train budget of creating this + model, expressed in milli node hours i.e. 1,000 + value in this field means 1 node hour. + The training cost of the model will not exceed + this budget. The final cost will be attempted to + be close to the budget, though may end up being + (even) noticeably smaller - at the backend's + discretion. This especially may happen when + further model training ceases to provide any + improvements. + If the budget is set to a value known to be + insufficient to train a model for the given + dataset, the training won't be attempted and + will error. + + The train budget must be between 1,000 and + 72,000 milli node hours, inclusive. + train_cost_milli_node_hours (int): + Output only. The actual training cost of the + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed the train budget. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. By default, the + early stopping feature is enabled, which means + that AutoML Tables might stop training before + the entire training budget has been used. + """ + + optimization_objective_recall_value = proto.Field( + proto.FLOAT, number=17, oneof="additional_optimization_objective_config" + ) + + optimization_objective_precision_value = proto.Field( + proto.FLOAT, number=18, oneof="additional_optimization_objective_config" + ) + + target_column_spec = proto.Field( + proto.MESSAGE, number=2, message=column_spec.ColumnSpec, + ) + + input_feature_column_specs = proto.RepeatedField( + proto.MESSAGE, number=3, message=column_spec.ColumnSpec, + ) + + optimization_objective = proto.Field(proto.STRING, number=4) + + tables_model_column_info = proto.RepeatedField( + proto.MESSAGE, number=5, message="TablesModelColumnInfo", + ) + + train_budget_milli_node_hours = proto.Field(proto.INT64, number=6) + + train_cost_milli_node_hours = proto.Field(proto.INT64, number=7) + + disable_early_stopping = proto.Field(proto.BOOL, number=12) + + +class TablesAnnotation(proto.Message): + r"""Contains annotation details specific to Tables. + + Attributes: + score (float): + Output only. A confidence estimate between 0.0 and 1.0, + inclusive. A higher value means greater confidence in the + returned value. For + + [target_column_spec][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + of FLOAT64 data type the score is not populated. + prediction_interval (~.ranges.DoubleRange): + Output only. Only populated when + + [target_column_spec][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + has FLOAT64 data type. An interval in which the exactly + correct target value has 95% chance to be in. + value (~.struct.Value): + The predicted value of the row's + + [target_column][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]. + The value depends on the column's DataType: + + - CATEGORY - the predicted (with the above confidence + ``score``) CATEGORY value. + + - FLOAT64 - the predicted (with above + ``prediction_interval``) FLOAT64 value. + tables_model_column_info (Sequence[~.tables.TablesModelColumnInfo]): + Output only. Auxiliary information for each of the model's + + [input_feature_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] + with respect to this particular prediction. If no other + fields than + + [column_spec_name][google.cloud.automl.v1beta1.TablesModelColumnInfo.column_spec_name] + and + + [column_display_name][google.cloud.automl.v1beta1.TablesModelColumnInfo.column_display_name] + would be populated, then this whole field is not. + baseline_score (float): + Output only. Stores the prediction score for + the baseline example, which is defined as the + example with all values set to their baseline + values. This is used as part of the Sampled + Shapley explanation of the model's prediction. + This field is populated only when feature + importance is requested. For regression models, + this holds the baseline prediction for the + baseline example. For classification models, + this holds the baseline prediction for the + baseline example for the argmax class. + """ + + score = proto.Field(proto.FLOAT, number=1) + + prediction_interval = proto.Field( + proto.MESSAGE, number=4, message=ranges.DoubleRange, + ) + + value = proto.Field(proto.MESSAGE, number=2, message=struct.Value,) + + tables_model_column_info = proto.RepeatedField( + proto.MESSAGE, number=3, message="TablesModelColumnInfo", + ) + + baseline_score = proto.Field(proto.FLOAT, number=5) + + +class TablesModelColumnInfo(proto.Message): + r"""An information specific to given column and Tables Model, in + context of the Model and the predictions created by it. + + Attributes: + column_spec_name (str): + Output only. The name of the ColumnSpec + describing the column. Not populated when this + proto is outputted to BigQuery. + column_display_name (str): + Output only. The display name of the column (same as the + display_name of its ColumnSpec). + feature_importance (float): + Output only. When given as part of a Model (always + populated): Measurement of how much model predictions + correctness on the TEST data depend on values in this + column. A value between 0 and 1, higher means higher + influence. These values are normalized - for all input + feature columns of a given model they add to 1. + + When given back by Predict (populated iff + [feature_importance + param][google.cloud.automl.v1beta1.PredictRequest.params] is + set) or Batch Predict (populated iff + [feature_importance][google.cloud.automl.v1beta1.PredictRequest.params] + param is set): Measurement of how impactful for the + prediction returned for the given row the value in this + column was. Specifically, the feature importance specifies + the marginal contribution that the feature made to the + prediction score compared to the baseline score. These + values are computed using the Sampled Shapley method. + """ + + column_spec_name = proto.Field(proto.STRING, number=1) + + column_display_name = proto.Field(proto.STRING, number=2) + + feature_importance = proto.Field(proto.FLOAT, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/temporal.py b/google/cloud/automl_v1beta1/types/temporal.py new file mode 100644 index 00000000..442ff4b5 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/temporal.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import duration_pb2 as duration # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", manifest={"TimeSegment",}, +) + + +class TimeSegment(proto.Message): + r"""A time period inside of an example that has a time dimension + (e.g. video). + + Attributes: + start_time_offset (~.duration.Duration): + Start of the time segment (inclusive), + represented as the duration since the example + start. + end_time_offset (~.duration.Duration): + End of the time segment (exclusive), + represented as the duration since the example + start. + """ + + start_time_offset = proto.Field(proto.MESSAGE, number=1, message=duration.Duration,) + + end_time_offset = proto.Field(proto.MESSAGE, number=2, message=duration.Duration,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/text.py b/google/cloud/automl_v1beta1/types/text.py new file mode 100644 index 00000000..8ecd3869 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/text.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import classification + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={ + "TextClassificationDatasetMetadata", + "TextClassificationModelMetadata", + "TextExtractionDatasetMetadata", + "TextExtractionModelMetadata", + "TextSentimentDatasetMetadata", + "TextSentimentModelMetadata", + }, +) + + +class TextClassificationDatasetMetadata(proto.Message): + r"""Dataset metadata for classification. + + Attributes: + classification_type (~.classification.ClassificationType): + Required. Type of the classification problem. + """ + + classification_type = proto.Field( + proto.ENUM, number=1, enum=classification.ClassificationType, + ) + + +class TextClassificationModelMetadata(proto.Message): + r"""Model metadata that is specific to text classification. + + Attributes: + classification_type (~.classification.ClassificationType): + Output only. Classification type of the + dataset used to train this model. + """ + + classification_type = proto.Field( + proto.ENUM, number=3, enum=classification.ClassificationType, + ) + + +class TextExtractionDatasetMetadata(proto.Message): + r"""Dataset metadata that is specific to text extraction""" + + +class TextExtractionModelMetadata(proto.Message): + r"""Model metadata that is specific to text extraction.""" + + +class TextSentimentDatasetMetadata(proto.Message): + r"""Dataset metadata for text sentiment. + + Attributes: + sentiment_max (int): + Required. A sentiment is expressed as an integer ordinal, + where higher value means a more positive sentiment. The + range of sentiments that will be used is between 0 and + sentiment_max (inclusive on both ends), and all the values + in the range must be represented in the dataset before a + model can be created. sentiment_max value must be between 1 + and 10 (inclusive). + """ + + sentiment_max = proto.Field(proto.INT32, number=1) + + +class TextSentimentModelMetadata(proto.Message): + r"""Model metadata that is specific to text sentiment.""" + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/text_extraction.py b/google/cloud/automl_v1beta1/types/text_extraction.py new file mode 100644 index 00000000..4193fc0e --- /dev/null +++ b/google/cloud/automl_v1beta1/types/text_extraction.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import text_segment as gca_text_segment + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={"TextExtractionAnnotation", "TextExtractionEvaluationMetrics",}, +) + + +class TextExtractionAnnotation(proto.Message): + r"""Annotation for identifying spans of text. + + Attributes: + text_segment (~.gca_text_segment.TextSegment): + An entity annotation will set this, which is + the part of the original text to which the + annotation pertains. + score (float): + Output only. A confidence estimate between + 0.0 and 1.0. A higher value means greater + confidence in correctness of the annotation. + """ + + text_segment = proto.Field( + proto.MESSAGE, + number=3, + oneof="annotation", + message=gca_text_segment.TextSegment, + ) + + score = proto.Field(proto.FLOAT, number=1) + + +class TextExtractionEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for text extraction problems. + + Attributes: + au_prc (float): + Output only. The Area under precision recall + curve metric. + confidence_metrics_entries (Sequence[~.text_extraction.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry]): + Output only. Metrics that have confidence + thresholds. Precision-recall curve can be + derived from it. + """ + + class ConfidenceMetricsEntry(proto.Message): + r"""Metrics for a single confidence threshold. + + Attributes: + confidence_threshold (float): + Output only. The confidence threshold value + used to compute the metrics. Only annotations + with score of at least this threshold are + considered to be ones the model would return. + recall (float): + Output only. Recall under the given + confidence threshold. + precision (float): + Output only. Precision under the given + confidence threshold. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + recall = proto.Field(proto.FLOAT, number=3) + + precision = proto.Field(proto.FLOAT, number=4) + + f1_score = proto.Field(proto.FLOAT, number=5) + + au_prc = proto.Field(proto.FLOAT, number=1) + + confidence_metrics_entries = proto.RepeatedField( + proto.MESSAGE, number=2, message=ConfidenceMetricsEntry, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/text_segment.py b/google/cloud/automl_v1beta1/types/text_segment.py new file mode 100644 index 00000000..646bb9f9 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/text_segment.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", manifest={"TextSegment",}, +) + + +class TextSegment(proto.Message): + r"""A contiguous part of a text (string), assuming it has an + UTF-8 NFC encoding. + + Attributes: + content (str): + Output only. The content of the TextSegment. + start_offset (int): + Required. Zero-based character index of the + first character of the text segment (counting + characters from the beginning of the text). + end_offset (int): + Required. Zero-based character index of the first character + past the end of the text segment (counting character from + the beginning of the text). The character at the end_offset + is NOT included in the text segment. + """ + + content = proto.Field(proto.STRING, number=3) + + start_offset = proto.Field(proto.INT64, number=1) + + end_offset = proto.Field(proto.INT64, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/text_sentiment.py b/google/cloud/automl_v1beta1/types/text_sentiment.py new file mode 100644 index 00000000..da055e23 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/text_sentiment.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import classification + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={"TextSentimentAnnotation", "TextSentimentEvaluationMetrics",}, +) + + +class TextSentimentAnnotation(proto.Message): + r"""Contains annotation details specific to text sentiment. + + Attributes: + sentiment (int): + Output only. The sentiment with the semantic, as given to + the + [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData] + when populating the dataset from which the model used for + the prediction had been trained. The sentiment values are + between 0 and + Dataset.text_sentiment_dataset_metadata.sentiment_max + (inclusive), with higher value meaning more positive + sentiment. They are completely relative, i.e. 0 means least + positive sentiment and sentiment_max means the most positive + from the sentiments present in the train data. Therefore + e.g. if train data had only negative sentiment, then + sentiment_max, would be still negative (although least + negative). The sentiment shouldn't be confused with "score" + or "magnitude" from the previous Natural Language Sentiment + Analysis API. + """ + + sentiment = proto.Field(proto.INT32, number=1) + + +class TextSentimentEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for text sentiment problems. + + Attributes: + precision (float): + Output only. Precision. + recall (float): + Output only. Recall. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + mean_absolute_error (float): + Output only. Mean absolute error. Only set + for the overall model evaluation, not for + evaluation of a single annotation spec. + mean_squared_error (float): + Output only. Mean squared error. Only set for + the overall model evaluation, not for evaluation + of a single annotation spec. + linear_kappa (float): + Output only. Linear weighted kappa. Only set + for the overall model evaluation, not for + evaluation of a single annotation spec. + quadratic_kappa (float): + Output only. Quadratic weighted kappa. Only + set for the overall model evaluation, not for + evaluation of a single annotation spec. + confusion_matrix (~.classification.ClassificationEvaluationMetrics.ConfusionMatrix): + Output only. Confusion matrix of the + evaluation. Only set for the overall model + evaluation, not for evaluation of a single + annotation spec. + annotation_spec_id (Sequence[str]): + Output only. The annotation spec ids used for + this evaluation. Deprecated . + """ + + precision = proto.Field(proto.FLOAT, number=1) + + recall = proto.Field(proto.FLOAT, number=2) + + f1_score = proto.Field(proto.FLOAT, number=3) + + mean_absolute_error = proto.Field(proto.FLOAT, number=4) + + mean_squared_error = proto.Field(proto.FLOAT, number=5) + + linear_kappa = proto.Field(proto.FLOAT, number=6) + + quadratic_kappa = proto.Field(proto.FLOAT, number=7) + + confusion_matrix = proto.Field( + proto.MESSAGE, + number=8, + message=classification.ClassificationEvaluationMetrics.ConfusionMatrix, + ) + + annotation_spec_id = proto.RepeatedField(proto.STRING, number=9) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/translation.py b/google/cloud/automl_v1beta1/types/translation.py new file mode 100644 index 00000000..9c7491e0 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/translation.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.automl_v1beta1.types import data_items + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={ + "TranslationDatasetMetadata", + "TranslationEvaluationMetrics", + "TranslationModelMetadata", + "TranslationAnnotation", + }, +) + + +class TranslationDatasetMetadata(proto.Message): + r"""Dataset metadata that is specific to translation. + + Attributes: + source_language_code (str): + Required. The BCP-47 language code of the + source language. + target_language_code (str): + Required. The BCP-47 language code of the + target language. + """ + + source_language_code = proto.Field(proto.STRING, number=1) + + target_language_code = proto.Field(proto.STRING, number=2) + + +class TranslationEvaluationMetrics(proto.Message): + r"""Evaluation metrics for the dataset. + + Attributes: + bleu_score (float): + Output only. BLEU score. + base_bleu_score (float): + Output only. BLEU score for base model. + """ + + bleu_score = proto.Field(proto.DOUBLE, number=1) + + base_bleu_score = proto.Field(proto.DOUBLE, number=2) + + +class TranslationModelMetadata(proto.Message): + r"""Model metadata that is specific to translation. + + Attributes: + base_model (str): + The resource name of the model to use as a baseline to train + the custom model. If unset, we use the default base model + provided by Google Translate. Format: + ``projects/{project_id}/locations/{location_id}/models/{model_id}`` + source_language_code (str): + Output only. Inferred from the dataset. + The source languge (The BCP-47 language code) + that is used for training. + target_language_code (str): + Output only. The target languge (The BCP-47 + language code) that is used for training. + """ + + base_model = proto.Field(proto.STRING, number=1) + + source_language_code = proto.Field(proto.STRING, number=2) + + target_language_code = proto.Field(proto.STRING, number=3) + + +class TranslationAnnotation(proto.Message): + r"""Annotation details specific to translation. + + Attributes: + translated_content (~.data_items.TextSnippet): + Output only . The translated content. + """ + + translated_content = proto.Field( + proto.MESSAGE, number=1, message=data_items.TextSnippet, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/automl_v1beta1/types/video.py b/google/cloud/automl_v1beta1/types/video.py new file mode 100644 index 00000000..685393b9 --- /dev/null +++ b/google/cloud/automl_v1beta1/types/video.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.automl.v1beta1", + manifest={ + "VideoClassificationDatasetMetadata", + "VideoObjectTrackingDatasetMetadata", + "VideoClassificationModelMetadata", + "VideoObjectTrackingModelMetadata", + }, +) + + +class VideoClassificationDatasetMetadata(proto.Message): + r"""Dataset metadata specific to video classification. + All Video Classification datasets are treated as multi label. + """ + + +class VideoObjectTrackingDatasetMetadata(proto.Message): + r"""Dataset metadata specific to video object tracking.""" + + +class VideoClassificationModelMetadata(proto.Message): + r"""Model metadata specific to video classification.""" + + +class VideoObjectTrackingModelMetadata(proto.Message): + r"""Model metadata specific to video object tracking.""" + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 00000000..4505b485 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/noxfile.py b/noxfile.py index 9a2db338..9c69b3b7 100644 --- a/noxfile.py +++ b/noxfile.py @@ -27,8 +27,8 @@ BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] DEFAULT_PYTHON_VERSION = "3.8" -SYSTEM_TEST_PYTHON_VERSIONS = ["2.7", "3.8"] -UNIT_TEST_PYTHON_VERSIONS = ["2.7", "3.5", "3.6", "3.7", "3.8"] +SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"] @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -70,8 +70,11 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. + session.install("asyncmock", "pytest-asyncio") + session.install("mock", "pytest", "pytest-cov") session.install("-e", ".[pandas,storage]") + session.install("proto-plus==1.8.1") # Run py.test against the unit tests. session.run( diff --git a/samples/beta/batch_predict.py b/samples/beta/batch_predict.py index 8dd5acef..65315b40 100644 --- a/samples/beta/batch_predict.py +++ b/samples/beta/batch_predict.py @@ -27,21 +27,27 @@ def batch_predict( prediction_client = automl.PredictionServiceClient() # Get the full path of the model. - model_full_id = prediction_client.model_path( + model_full_id = automl.AutoMlClient.model_path( project_id, "us-central1", model_id ) - gcs_source = automl.types.GcsSource(input_uris=[input_uri]) + gcs_source = automl.GcsSource(input_uris=[input_uri]) - input_config = automl.types.BatchPredictInputConfig(gcs_source=gcs_source) - gcs_destination = automl.types.GcsDestination(output_uri_prefix=output_uri) - output_config = automl.types.BatchPredictOutputConfig( + input_config = automl.BatchPredictInputConfig(gcs_source=gcs_source) + gcs_destination = automl.GcsDestination(output_uri_prefix=output_uri) + output_config = automl.BatchPredictOutputConfig( gcs_destination=gcs_destination ) params = {} + request = automl.BatchPredictRequest( + name=model_full_id, + input_config=input_config, + output_config=output_config, + params=params + ) response = prediction_client.batch_predict( - model_full_id, input_config, output_config, params=params + request=request ) print("Waiting for operation to complete...") diff --git a/samples/beta/cancel_operation.py b/samples/beta/cancel_operation.py index a30fe2a4..5fa3e3c2 100644 --- a/samples/beta/cancel_operation.py +++ b/samples/beta/cancel_operation.py @@ -31,7 +31,7 @@ def sample_cancel_operation(project, operation_id): client = automl_v1beta1.AutoMlClient() - operations_client = client.transport._operations_client + operations_client = client._transport.operations_client # project = '[Google Cloud Project ID]' # operation_id = '[Operation ID]' diff --git a/samples/beta/delete_dataset.py b/samples/beta/delete_dataset.py index 51647758..341e2413 100644 --- a/samples/beta/delete_dataset.py +++ b/samples/beta/delete_dataset.py @@ -24,7 +24,7 @@ def delete_dataset(project_id="YOUR_PROJECT_ID", dataset_id="YOUR_DATASET_ID"): dataset_full_id = client.dataset_path( project_id, "us-central1", dataset_id ) - response = client.delete_dataset(dataset_full_id) + response = client.delete_dataset(name=dataset_full_id) print("Dataset deleted. {}".format(response.result())) # [END automl_delete_dataset_beta] diff --git a/samples/beta/delete_dataset_test.py b/samples/beta/delete_dataset_test.py index 9781ad26..181db832 100644 --- a/samples/beta/delete_dataset_test.py +++ b/samples/beta/delete_dataset_test.py @@ -27,13 +27,13 @@ @pytest.fixture(scope="function") def dataset_id(): client = automl.AutoMlClient() - project_location = client.location_path(PROJECT_ID, "us-central1") + project_location = f"projects/{PROJECT_ID}/locations/us-central1" display_name = "test_{}".format(uuid.uuid4()).replace("-", "")[:32] - metadata = automl.types.TextExtractionDatasetMetadata() - dataset = automl.types.Dataset( + metadata = automl.TextExtractionDatasetMetadata() + dataset = automl.Dataset( display_name=display_name, text_extraction_dataset_metadata=metadata ) - response = client.create_dataset(project_location, dataset) + response = client.create_dataset(parent=project_location, dataset=dataset) dataset_id = response.name.split("/")[-1] yield dataset_id diff --git a/samples/beta/delete_model.py b/samples/beta/delete_model.py index 030a2900..85ad913c 100644 --- a/samples/beta/delete_model.py +++ b/samples/beta/delete_model.py @@ -25,7 +25,7 @@ def delete_model(project_id, model_id): client = automl.AutoMlClient() # Get the full path of the model. model_full_id = client.model_path(project_id, "us-central1", model_id) - response = client.delete_model(model_full_id) + response = client.delete_model(name=model_full_id) print("Model deleted. {}".format(response.result())) # [END automl_delete_model_beta] diff --git a/samples/beta/get_model.py b/samples/beta/get_model.py index 834dac0c..9e8dcb9c 100644 --- a/samples/beta/get_model.py +++ b/samples/beta/get_model.py @@ -25,10 +25,10 @@ def get_model(project_id, model_id): client = automl.AutoMlClient() # Get the full path of the model. model_full_id = client.model_path(project_id, "us-central1", model_id) - model = client.get_model(model_full_id) + model = client.get_model(name=model_full_id) # Retrieve deployment state. - if model.deployment_state == automl.enums.Model.DeploymentState.DEPLOYED: + if model.deployment_state == automl.Model.DeploymentState.DEPLOYED: deployment_state = "deployed" else: deployment_state = "undeployed" @@ -37,8 +37,6 @@ def get_model(project_id, model_id): print("Model name: {}".format(model.name)) print("Model id: {}".format(model.name.split("/")[-1])) print("Model display name: {}".format(model.display_name)) - print("Model create time:") - print("\tseconds: {}".format(model.create_time.seconds)) - print("\tnanos: {}".format(model.create_time.nanos)) + print("Model create time: {}".format(model.create_time)) print("Model deployment state: {}".format(deployment_state)) # [END automl_get_model_beta] diff --git a/samples/beta/get_model_evaluation.py b/samples/beta/get_model_evaluation.py index ed540f2e..07ad3738 100644 --- a/samples/beta/get_model_evaluation.py +++ b/samples/beta/get_model_evaluation.py @@ -26,18 +26,15 @@ def get_model_evaluation(project_id, model_id, model_evaluation_id): client = automl.AutoMlClient() # Get the full path of the model evaluation. - model_evaluation_full_id = client.model_evaluation_path( - project_id, "us-central1", model_id, model_evaluation_id - ) + model_path = client.model_path(project_id, "us-central1", model_id) + model_evaluation_full_id = f"{model_path}/modelEvaluations/{model_evaluation_id}" # Get complete detail of the model evaluation. - response = client.get_model_evaluation(model_evaluation_full_id) + response = client.get_model_evaluation(name=model_evaluation_full_id) print("Model evaluation name: {}".format(response.name)) print("Model annotation spec id: {}".format(response.annotation_spec_id)) - print("Create Time:") - print("\tseconds: {}".format(response.create_time.seconds)) - print("\tnanos: {}".format(response.create_time.nanos / 1e9)) + print("Create Time: {}".format(response.create_time)) print( "Evaluation example count: {}".format(response.evaluated_example_count) ) diff --git a/samples/beta/get_model_evaluation_test.py b/samples/beta/get_model_evaluation_test.py index 5b2ecf36..bdf75e15 100644 --- a/samples/beta/get_model_evaluation_test.py +++ b/samples/beta/get_model_evaluation_test.py @@ -27,9 +27,12 @@ def model_evaluation_id(): client = automl.AutoMlClient() model_full_id = client.model_path(PROJECT_ID, "us-central1", MODEL_ID) - generator = client.list_model_evaluations(model_full_id, "").pages - page = next(generator) - evaluation = page.next() + request = automl.ListModelEvaluationsRequest( + parent=model_full_id, + filter="" + ) + evaluations = client.list_model_evaluations(request=request) + evaluation = next(iter(evaluations)) model_evaluation_id = evaluation.name.split( "{}/modelEvaluations/".format(MODEL_ID) )[1].split("\n")[0] diff --git a/samples/beta/get_operation_status.py b/samples/beta/get_operation_status.py index f376e246..612acbaa 100644 --- a/samples/beta/get_operation_status.py +++ b/samples/beta/get_operation_status.py @@ -24,7 +24,7 @@ def get_operation_status( client = automl.AutoMlClient() # Get the latest state of a long-running operation. - response = client.transport._operations_client.get_operation( + response = client._transport.operations_client.get_operation( operation_full_id ) diff --git a/samples/beta/get_operation_status_test.py b/samples/beta/get_operation_status_test.py index 7da9e7b3..b178e9a2 100644 --- a/samples/beta/get_operation_status_test.py +++ b/samples/beta/get_operation_status_test.py @@ -25,8 +25,8 @@ @pytest.fixture(scope="function") def operation_id(): client = automl.AutoMlClient() - project_location = client.location_path(PROJECT_ID, "us-central1") - generator = client.transport._operations_client.list_operations( + project_location = f"projects/{PROJECT_ID}/locations/us-central1" + generator = client._transport.operations_client.list_operations( project_location, filter_="" ).pages page = next(generator) diff --git a/samples/beta/import_dataset.py b/samples/beta/import_dataset.py index 97f1c0b8..d0910615 100644 --- a/samples/beta/import_dataset.py +++ b/samples/beta/import_dataset.py @@ -30,10 +30,10 @@ def import_dataset( ) # Get the multiple Google Cloud Storage URIs input_uris = path.split(",") - gcs_source = automl.types.GcsSource(input_uris=input_uris) - input_config = automl.types.InputConfig(gcs_source=gcs_source) + gcs_source = automl.GcsSource(input_uris=input_uris) + input_config = automl.InputConfig(gcs_source=gcs_source) # Import data from the input URI - response = client.import_data(dataset_full_id, input_config) + response = client.import_data(name=dataset_full_id, input_config=input_config) print("Processing import...") print("Data imported. {}".format(response.result())) diff --git a/samples/beta/list_datasets.py b/samples/beta/list_datasets.py index 5d5c83a3..ad3042b5 100644 --- a/samples/beta/list_datasets.py +++ b/samples/beta/list_datasets.py @@ -22,19 +22,18 @@ def list_datasets(project_id="YOUR_PROJECT_ID"): """List datasets.""" client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") + project_location = f"projects/{project_id}/locations/us-central1" # List all the datasets available in the region. - response = client.list_datasets(project_location, "") + request = automl.ListDatasetsRequest(parent=project_location, filter="") + response = client.list_datasets(request=request) print("List of datasets:") for dataset in response: print("Dataset name: {}".format(dataset.name)) print("Dataset id: {}".format(dataset.name.split("/")[-1])) print("Dataset display name: {}".format(dataset.display_name)) - print("Dataset create time:") - print("\tseconds: {}".format(dataset.create_time.seconds)) - print("\tnanos: {}".format(dataset.create_time.nanos)) + print("Dataset create time: {}".format(dataset.create_time)) # [END automl_video_object_tracking_list_datasets_beta] print( diff --git a/samples/beta/list_models.py b/samples/beta/list_models.py index 7e9c7e34..684e2ec2 100644 --- a/samples/beta/list_models.py +++ b/samples/beta/list_models.py @@ -23,15 +23,16 @@ def list_models(project_id): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") - response = client.list_models(project_location, "") + project_location = f"projects/{project_id}/locations/us-central1" + request = automl.ListModelsRequest(parent=project_location, filter="") + response = client.list_models(request=request) print("List of models:") for model in response: # Display the model information. if ( model.deployment_state - == automl.enums.Model.DeploymentState.DEPLOYED + == automl.Model.DeploymentState.DEPLOYED ): deployment_state = "deployed" else: @@ -40,8 +41,6 @@ def list_models(project_id): print("Model name: {}".format(model.name)) print("Model id: {}".format(model.name.split("/")[-1])) print("Model display name: {}".format(model.display_name)) - print("Model create time:") - print("\tseconds: {}".format(model.create_time.seconds)) - print("\tnanos: {}".format(model.create_time.nanos)) + print("Model create time: {}".format(model.create_time)) print("Model deployment state: {}".format(deployment_state)) # [END automl_list_models_beta] diff --git a/samples/beta/set_endpoint.py b/samples/beta/set_endpoint.py index 436e427e..f335e35e 100644 --- a/samples/beta/set_endpoint.py +++ b/samples/beta/set_endpoint.py @@ -27,13 +27,18 @@ def set_endpoint(project_id): # A resource that represents Google Cloud Platform location. # project_id = 'YOUR_PROJECT_ID' - project_location = client.location_path(project_id, 'eu') + project_location = f"projects/{project_id}/locations/eu" # [END automl_set_endpoint] # List all the datasets available # Note: Create a dataset in `eu`, before calling `list_datasets`. + request = automl.ListDatasetsRequest( + parent=project_location, + filter="" + ) response = client.list_datasets( - project_location, filter_='') + request=request + ) for dataset in response: print(dataset) diff --git a/samples/beta/video_classification_create_dataset.py b/samples/beta/video_classification_create_dataset.py index 086f98f0..7d756b09 100644 --- a/samples/beta/video_classification_create_dataset.py +++ b/samples/beta/video_classification_create_dataset.py @@ -25,15 +25,15 @@ def create_dataset( client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") - metadata = automl.types.VideoClassificationDatasetMetadata() - dataset = automl.types.Dataset( + project_location = f"projects/{project_id}/locations/us-central1" + metadata = automl.VideoClassificationDatasetMetadata() + dataset = automl.Dataset( display_name=display_name, video_classification_dataset_metadata=metadata, ) # Create a dataset with the dataset metadata in the region. - created_dataset = client.create_dataset(project_location, dataset) + created_dataset = client.create_dataset(parent=project_location, dataset=dataset) # Display the dataset information print("Dataset name: {}".format(created_dataset.name)) diff --git a/samples/beta/video_classification_create_dataset_test.py b/samples/beta/video_classification_create_dataset_test.py index 443f5042..9eb994d0 100644 --- a/samples/beta/video_classification_create_dataset_test.py +++ b/samples/beta/video_classification_create_dataset_test.py @@ -34,7 +34,7 @@ def teardown(): dataset_full_id = client.dataset_path( PROJECT_ID, "us-central1", DATASET_ID ) - response = client.delete_dataset(dataset_full_id) + response = client.delete_dataset(name=dataset_full_id) response.result() diff --git a/samples/beta/video_classification_create_model.py b/samples/beta/video_classification_create_model.py index 5bf19b4e..58e0ab32 100644 --- a/samples/beta/video_classification_create_model.py +++ b/samples/beta/video_classification_create_model.py @@ -25,18 +25,19 @@ def create_model( client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") + project_location = f"projects/{project_id}/locations/us-central1" # Leave model unset to use the default base model provided by Google - metadata = automl.types.VideoClassificationModelMetadata() - model = automl.types.Model( + metadata = automl.VideoClassificationModelMetadata() + model = automl.Model( display_name=display_name, dataset_id=dataset_id, video_classification_model_metadata=metadata, ) # Create a model with the model metadata in the region. - response = client.create_model(project_location, model) + response = client.create_model(parent=project_location, model=model) print("Training operation name: {}".format(response.operation.name)) print("Training started...") # [END automl_video_classification_create_model_beta] + return response diff --git a/samples/beta/video_classification_create_model_test.py b/samples/beta/video_classification_create_model_test.py index 593166cb..f6af031b 100644 --- a/samples/beta/video_classification_create_model_test.py +++ b/samples/beta/video_classification_create_model_test.py @@ -31,7 +31,7 @@ def teardown(): # Cancel the training operation client = automl.AutoMlClient() - client.transport._operations_client.cancel_operation(OPERATION_ID) + client._transport.operations_client.cancel_operation(OPERATION_ID) def test_video_classification_create_model(capsys): @@ -39,6 +39,7 @@ def test_video_classification_create_model(capsys): video_classification_create_model.create_model( PROJECT_ID, DATASET_ID, model_name ) + out, _ = capsys.readouterr() assert "Training started" in out diff --git a/samples/beta/video_object_tracking_create_dataset.py b/samples/beta/video_object_tracking_create_dataset.py index 2a651d0d..c99d5c02 100644 --- a/samples/beta/video_object_tracking_create_dataset.py +++ b/samples/beta/video_object_tracking_create_dataset.py @@ -23,15 +23,15 @@ def create_dataset( client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") - metadata = automl.types.VideoObjectTrackingDatasetMetadata() - dataset = automl.types.Dataset( + project_location = f"projects/{project_id}/locations/us-central1" + metadata = automl.VideoObjectTrackingDatasetMetadata() + dataset = automl.Dataset( display_name=display_name, video_object_tracking_dataset_metadata=metadata, ) # Create a dataset with the dataset metadata in the region. - created_dataset = client.create_dataset(project_location, dataset) + created_dataset = client.create_dataset(parent=project_location, dataset=dataset) # Display the dataset information print("Dataset name: {}".format(created_dataset.name)) print("Dataset id: {}".format(created_dataset.name.split("/")[-1])) diff --git a/samples/beta/video_object_tracking_create_dataset_test.py b/samples/beta/video_object_tracking_create_dataset_test.py index 96957f71..1ef744cb 100644 --- a/samples/beta/video_object_tracking_create_dataset_test.py +++ b/samples/beta/video_object_tracking_create_dataset_test.py @@ -33,7 +33,7 @@ def teardown(): dataset_full_id = client.dataset_path( PROJECT_ID, "us-central1", DATASET_ID ) - response = client.delete_dataset(dataset_full_id) + response = client.delete_dataset(name=dataset_full_id) response.result() diff --git a/samples/beta/video_object_tracking_create_model.py b/samples/beta/video_object_tracking_create_model.py index 5ff8be98..38ce963c 100644 --- a/samples/beta/video_object_tracking_create_model.py +++ b/samples/beta/video_object_tracking_create_model.py @@ -25,17 +25,17 @@ def create_model( client = automl.AutoMlClient() # A resource that represents Google Cloud Platform loacation. - project_location = client.location_path(project_id, "us-central1") + project_location = f"projects/{project_id}/locations/us-central1" # Leave model unset to use the default base model provided by Google - metadata = automl.types.VideoObjectTrackingModelMetadata() - model = automl.types.Model( + metadata = automl.VideoObjectTrackingModelMetadata() + model = automl.Model( display_name=display_name, dataset_id=dataset_id, video_object_tracking_model_metadata=metadata, ) # Create a model with the model metadata in the region. - response = client.create_model(project_location, model) + response = client.create_model(parent=project_location, model=model) print("Training operation name: {}".format(response.operation.name)) print("Training started...") diff --git a/samples/beta/video_object_tracking_create_model_test.py b/samples/beta/video_object_tracking_create_model_test.py index a06d65bf..5844f18f 100644 --- a/samples/beta/video_object_tracking_create_model_test.py +++ b/samples/beta/video_object_tracking_create_model_test.py @@ -31,7 +31,7 @@ def teardown(): # Cancel the training operation client = automl.AutoMlClient() - client.transport._operations_client.cancel_operation(OPERATION_ID) + client._transport.operations_client.cancel_operation(OPERATION_ID) def test_video_classification_create_model(capsys): diff --git a/samples/snippets/automl_translation_dataset.py b/samples/snippets/automl_translation_dataset.py index cf3e50ae..e1dd739a 100755 --- a/samples/snippets/automl_translation_dataset.py +++ b/samples/snippets/automl_translation_dataset.py @@ -40,7 +40,7 @@ def create_dataset(project_id, compute_region, dataset_name, source, target): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, compute_region) + project_location = f"projects/{project_id}/locations/{compute_region}" # Specify the source and target language. dataset_metadata = { @@ -54,7 +54,7 @@ def create_dataset(project_id, compute_region, dataset_name, source, target): } # Create a dataset with the dataset metadata in the region. - dataset = client.create_dataset(project_location, my_dataset) + dataset = client.create_dataset(parent=project_location, dataset=my_dataset) # Display the dataset information print("Dataset name: {}".format(dataset.name)) @@ -71,9 +71,7 @@ def create_dataset(project_id, compute_region, dataset_name, source, target): dataset.translation_dataset_metadata.target_language_code ) ) - print("Dataset create time:") - print("\tseconds: {}".format(dataset.create_time.seconds)) - print("\tnanos: {}".format(dataset.create_time.nanos)) + print("Dataset create time: {}".format(dataset.create_time)) # [END automl_translate_create_dataset] @@ -91,10 +89,11 @@ def list_datasets(project_id, compute_region, filter_): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, compute_region) + project_location = f"projects/{project_id}/locations/{compute_region}" # List all the datasets available in the region by applying filter. - response = client.list_datasets(project_location, filter_) + request = automl.ListDatasetsRequest(parent=project_location, filter=filter_) + response = client.list_datasets(request=request) print("List of datasets:") for dataset in response: @@ -113,9 +112,7 @@ def list_datasets(project_id, compute_region, filter_): dataset.translation_dataset_metadata.target_language_code ) ) - print("Dataset create time:") - print("\tseconds: {}".format(dataset.create_time.seconds)) - print("\tnanos: {}".format(dataset.create_time.nanos)) + print("Dataset create time: {}".format(dataset.create_time)) # [END automl_translate_list_datasets] @@ -138,7 +135,7 @@ def get_dataset(project_id, compute_region, dataset_id): ) # Get complete detail of the dataset. - dataset = client.get_dataset(dataset_full_id) + dataset = client.get_dataset(name=dataset_full_id) # Display the dataset information print("Dataset name: {}".format(dataset.name)) @@ -155,9 +152,7 @@ def get_dataset(project_id, compute_region, dataset_id): dataset.translation_dataset_metadata.target_language_code ) ) - print("Dataset create time:") - print("\tseconds: {}".format(dataset.create_time.seconds)) - print("\tnanos: {}".format(dataset.create_time.nanos)) + print("Dataset create time: {}".format(dataset.create_time)) # [END automl_translate_get_dataset] @@ -185,7 +180,7 @@ def import_data(project_id, compute_region, dataset_id, path): input_config = {"gcs_source": {"input_uris": input_uris}} # Import data from the input URI - response = client.import_data(dataset_full_id, input_config) + response = client.import_data(name=dataset_full_id, input_config=input_config) print("Processing import...") # synchronous check of operation status @@ -212,7 +207,7 @@ def delete_dataset(project_id, compute_region, dataset_id): ) # Delete a dataset. - response = client.delete_dataset(dataset_full_id) + response = client.delete_dataset(name=dataset_full_id) # synchronous check of operation status print("Dataset deleted. {}".format(response.result())) diff --git a/samples/snippets/automl_translation_model.py b/samples/snippets/automl_translation_model.py index 77a4ed73..4f9d1655 100755 --- a/samples/snippets/automl_translation_model.py +++ b/samples/snippets/automl_translation_model.py @@ -49,7 +49,7 @@ def create_model(project_id, compute_region, dataset_id, model_name): } # Create a model with the model metadata in the region. - response = client.create_model(project_location, my_model) + response = client.create_model(parent=project_location, model=my_model) print("Training operation name: {}".format(response.operation.name)) print("Training started...") @@ -66,20 +66,20 @@ def list_models(project_id, compute_region, filter_): # filter_ = 'DATASET_ID_HERE' from google.cloud import automl_v1beta1 as automl - from google.cloud.automl_v1beta1 import enums client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, compute_region) + project_location = f"projects/{project_id}/locations/{compute_region}" # List all the models available in the region by applying filter. - response = client.list_models(project_location, filter_) + request = automl.ListModelsRequest(parent=project_location, filter=filter_) + response = client.list_models(request=request) print("List of models:") for model in response: # Display the model information. - if model.deployment_state == enums.Model.DeploymentState.DEPLOYED: + if model.deployment_state == automl.Model.DeploymentState.DEPLOYED: deployment_state = "deployed" else: deployment_state = "undeployed" @@ -87,9 +87,7 @@ def list_models(project_id, compute_region, filter_): print("Model name: {}".format(model.name)) print("Model id: {}".format(model.name.split("/")[-1])) print("Model display name: {}".format(model.display_name)) - print("Model create time:") - print("\tseconds: {}".format(model.create_time.seconds)) - print("\tnanos: {}".format(model.create_time.nanos)) + print("Model create time: {}".format(model.create_time)) print("Model deployment state: {}".format(deployment_state)) # [END automl_translate_list_models] @@ -104,7 +102,6 @@ def get_model(project_id, compute_region, model_id): # model_id = 'MODEL_ID_HERE' from google.cloud import automl_v1beta1 as automl - from google.cloud.automl_v1beta1 import enums client = automl.AutoMlClient() @@ -112,10 +109,10 @@ def get_model(project_id, compute_region, model_id): model_full_id = client.model_path(project_id, compute_region, model_id) # Get complete detail of the model. - model = client.get_model(model_full_id) + model = client.get_model(name=model_full_id) # Retrieve deployment state. - if model.deployment_state == enums.Model.DeploymentState.DEPLOYED: + if model.deployment_state == automl.Model.DeploymentState.DEPLOYED: deployment_state = "deployed" else: deployment_state = "undeployed" @@ -124,9 +121,7 @@ def get_model(project_id, compute_region, model_id): print("Model name: {}".format(model.name)) print("Model id: {}".format(model.name.split("/")[-1])) print("Model display name: {}".format(model.display_name)) - print("Model create time:") - print("\tseconds: {}".format(model.create_time.seconds)) - print("\tnanos: {}".format(model.create_time.nanos)) + print("Model create time: {}".format(model.create_time)) print("Model deployment state: {}".format(deployment_state)) # [END automl_translate_get_model] @@ -149,7 +144,11 @@ def list_model_evaluations(project_id, compute_region, model_id, filter_): model_full_id = client.model_path(project_id, compute_region, model_id) print("List of model evaluations:") - for element in client.list_model_evaluations(model_full_id, filter_): + request = automl.ListModelEvaluationsRequest( + parent=model_full_id, + filter=filter_ + ) + for element in client.list_model_evaluations(request=request): print(element) # [END automl_translate_list_model_evaluations] @@ -171,12 +170,11 @@ def get_model_evaluation( client = automl.AutoMlClient() # Get the full path of the model evaluation. - model_evaluation_full_id = client.model_evaluation_path( - project_id, compute_region, model_id, model_evaluation_id - ) + model_path = client.model_path(project_id, compute_region, model_id) + model_evaluation_full_id = f"{model_path}/modelEvaluations/{model_evaluation_id}" # Get complete detail of the model evaluation. - response = client.get_model_evaluation(model_evaluation_full_id) + response = client.get_model_evaluation(name=model_evaluation_full_id) print(response) @@ -199,7 +197,7 @@ def delete_model(project_id, compute_region, model_id): model_full_id = client.model_path(project_id, compute_region, model_id) # Delete a model. - response = client.delete_model(model_full_id) + response = client.delete_model(name=model_full_id) # synchronous check of operation status. print("Model deleted. {}".format(response.result())) @@ -219,7 +217,7 @@ def get_operation_status(operation_full_id): client = automl.AutoMlClient() # Get the latest state of a long-running operation. - response = client.transport._operations_client.get_operation( + response = client._transport.operations_client.get_operation( operation_full_id ) diff --git a/samples/snippets/automl_translation_predict.py b/samples/snippets/automl_translation_predict.py index b15e0e30..70c14e36 100644 --- a/samples/snippets/automl_translation_predict.py +++ b/samples/snippets/automl_translation_predict.py @@ -56,7 +56,13 @@ def predict(project_id, compute_region, model_id, file_path): # params is additional domain-specific parameters. params = {} - response = prediction_client.predict(model_full_id, payload, params) + request = automl.PredictRequest( + name=model_full_id, + payload=payload, + params=params + ) + + response = prediction_client.predict(request=request) translated_content = response.payload[0].translation.translated_content print(u"Translated content: {}".format(translated_content.content)) diff --git a/samples/snippets/batch_predict.py b/samples/snippets/batch_predict.py index efe484f4..427fadf2 100644 --- a/samples/snippets/batch_predict.py +++ b/samples/snippets/batch_predict.py @@ -27,26 +27,24 @@ def batch_predict(project_id, model_id, input_uri, output_uri): prediction_client = automl.PredictionServiceClient() # Get the full path of the model. - model_full_id = prediction_client.model_path( - project_id, "us-central1", model_id - ) + model_full_id = f"projects/{project_id}/locations/us-central1/models/{model_id}" - gcs_source = automl.types.GcsSource(input_uris=[input_uri]) + gcs_source = automl.GcsSource(input_uris=[input_uri]) - input_config = automl.types.BatchPredictInputConfig(gcs_source=gcs_source) - gcs_destination = automl.types.GcsDestination(output_uri_prefix=output_uri) - output_config = automl.types.BatchPredictOutputConfig( + input_config = automl.BatchPredictInputConfig(gcs_source=gcs_source) + gcs_destination = automl.GcsDestination(output_uri_prefix=output_uri) + output_config = automl.BatchPredictOutputConfig( gcs_destination=gcs_destination ) response = prediction_client.batch_predict( - model_full_id, input_config, output_config + name=model_full_id, + input_config=input_config, + output_config=output_config ) print("Waiting for operation to complete...") print( - "Batch Prediction results saved to Cloud Storage bucket. {}".format( - response.result() - ) + f"Batch Prediction results saved to Cloud Storage bucket. {response.result()}" ) # [END automl_batch_predict] diff --git a/samples/snippets/delete_dataset.py b/samples/snippets/delete_dataset.py index e6136c13..23846b43 100644 --- a/samples/snippets/delete_dataset.py +++ b/samples/snippets/delete_dataset.py @@ -27,7 +27,7 @@ def delete_dataset(project_id, dataset_id): dataset_full_id = client.dataset_path( project_id, "us-central1", dataset_id ) - response = client.delete_dataset(dataset_full_id) + response = client.delete_dataset(name=dataset_full_id) print("Dataset deleted. {}".format(response.result())) # [END automl_delete_dataset] diff --git a/samples/snippets/delete_dataset_test.py b/samples/snippets/delete_dataset_test.py index 6d204dde..d085c545 100644 --- a/samples/snippets/delete_dataset_test.py +++ b/samples/snippets/delete_dataset_test.py @@ -27,13 +27,13 @@ @pytest.fixture(scope="function") def dataset_id(): client = automl.AutoMlClient() - project_location = client.location_path(PROJECT_ID, "us-central1") + project_location = f"projects/{PROJECT_ID}/locations/us-central1" display_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") - metadata = automl.types.TextExtractionDatasetMetadata() - dataset = automl.types.Dataset( + metadata = automl.TextExtractionDatasetMetadata() + dataset = automl.Dataset( display_name=display_name, text_extraction_dataset_metadata=metadata ) - response = client.create_dataset(project_location, dataset) + response = client.create_dataset(parent=project_location, dataset=dataset) dataset_id = response.result().name.split("/")[-1] yield dataset_id diff --git a/samples/snippets/delete_model.py b/samples/snippets/delete_model.py index cc6e7546..1675bd2c 100644 --- a/samples/snippets/delete_model.py +++ b/samples/snippets/delete_model.py @@ -25,7 +25,7 @@ def delete_model(project_id, model_id): client = automl.AutoMlClient() # Get the full path of the model. model_full_id = client.model_path(project_id, "us-central1", model_id) - response = client.delete_model(model_full_id) + response = client.delete_model(name=model_full_id) print("Model deleted. {}".format(response.result())) # [END automl_delete_model] diff --git a/samples/snippets/deploy_model.py b/samples/snippets/deploy_model.py index cc55cf1e..12518fef 100644 --- a/samples/snippets/deploy_model.py +++ b/samples/snippets/deploy_model.py @@ -25,7 +25,7 @@ def deploy_model(project_id, model_id): client = automl.AutoMlClient() # Get the full path of the model. model_full_id = client.model_path(project_id, "us-central1", model_id) - response = client.deploy_model(model_full_id) + response = client.deploy_model(name=model_full_id) - print("Model deployment finished. {}".format(response.result())) + print(f"Model deployment finished. {response.result()}") # [END automl_deploy_model] diff --git a/samples/snippets/export_dataset.py b/samples/snippets/export_dataset.py index 45f7ee6b..6be80907 100644 --- a/samples/snippets/export_dataset.py +++ b/samples/snippets/export_dataset.py @@ -30,9 +30,9 @@ def export_dataset(project_id, dataset_id, gcs_uri): project_id, "us-central1", dataset_id ) - gcs_destination = automl.types.GcsDestination(output_uri_prefix=gcs_uri) - output_config = automl.types.OutputConfig(gcs_destination=gcs_destination) + gcs_destination = automl.GcsDestination(output_uri_prefix=gcs_uri) + output_config = automl.OutputConfig(gcs_destination=gcs_destination) - response = client.export_data(dataset_full_id, output_config) - print("Dataset exported. {}".format(response.result())) + response = client.export_data(name=dataset_full_id, output_config=output_config) + print(f"Dataset exported. {response.result()}") # [END automl_export_dataset] diff --git a/samples/snippets/get_dataset.py b/samples/snippets/get_dataset.py index a1831903..b0ce2c8a 100644 --- a/samples/snippets/get_dataset.py +++ b/samples/snippets/get_dataset.py @@ -32,15 +32,13 @@ def get_dataset(project_id, dataset_id): dataset_full_id = client.dataset_path( project_id, "us-central1", dataset_id ) - dataset = client.get_dataset(dataset_full_id) + dataset = client.get_dataset(name=dataset_full_id) # Display the dataset information print("Dataset name: {}".format(dataset.name)) print("Dataset id: {}".format(dataset.name.split("/")[-1])) print("Dataset display name: {}".format(dataset.display_name)) - print("Dataset create time:") - print("\tseconds: {}".format(dataset.create_time.seconds)) - print("\tnanos: {}".format(dataset.create_time.nanos)) + print("Dataset create time: {}".format(dataset.create_time)) # [END automl_language_sentiment_analysis_get_dataset] # [END automl_language_text_classification_get_dataset] # [END automl_translate_get_dataset] diff --git a/samples/snippets/get_model.py b/samples/snippets/get_model.py index b1ea5154..2c686d7e 100644 --- a/samples/snippets/get_model.py +++ b/samples/snippets/get_model.py @@ -25,10 +25,10 @@ def get_model(project_id, model_id): client = automl.AutoMlClient() # Get the full path of the model. model_full_id = client.model_path(project_id, "us-central1", model_id) - model = client.get_model(model_full_id) + model = client.get_model(name=model_full_id) # Retrieve deployment state. - if model.deployment_state == automl.enums.Model.DeploymentState.DEPLOYED: + if model.deployment_state == automl.Model.DeploymentState.DEPLOYED: deployment_state = "deployed" else: deployment_state = "undeployed" @@ -37,8 +37,6 @@ def get_model(project_id, model_id): print("Model name: {}".format(model.name)) print("Model id: {}".format(model.name.split("/")[-1])) print("Model display name: {}".format(model.display_name)) - print("Model create time:") - print("\tseconds: {}".format(model.create_time.seconds)) - print("\tnanos: {}".format(model.create_time.nanos)) + print("Model create time: {}".format(model.create_time)) print("Model deployment state: {}".format(deployment_state)) # [END automl_get_model] diff --git a/samples/snippets/get_model_evaluation.py b/samples/snippets/get_model_evaluation.py index 4a1a97a3..90541021 100644 --- a/samples/snippets/get_model_evaluation.py +++ b/samples/snippets/get_model_evaluation.py @@ -30,18 +30,15 @@ def get_model_evaluation(project_id, model_id, model_evaluation_id): client = automl.AutoMlClient() # Get the full path of the model evaluation. - model_evaluation_full_id = client.model_evaluation_path( - project_id, "us-central1", model_id, model_evaluation_id - ) + model_path = client.model_path(project_id, "us-central1", model_id) + model_evaluation_full_id = f"{model_path}/modelEvaluations/{model_evaluation_id}" # Get complete detail of the model evaluation. - response = client.get_model_evaluation(model_evaluation_full_id) + response = client.get_model_evaluation(name=model_evaluation_full_id) print("Model evaluation name: {}".format(response.name)) print("Model annotation spec id: {}".format(response.annotation_spec_id)) - print("Create Time:") - print("\tseconds: {}".format(response.create_time.seconds)) - print("\tnanos: {}".format(response.create_time.nanos / 1e9)) + print("Create Time: {}".format(response.create_time)) print( "Evaluation example count: {}".format(response.evaluated_example_count) ) diff --git a/samples/snippets/get_model_evaluation_test.py b/samples/snippets/get_model_evaluation_test.py index f3fe1b2b..8b868cb3 100644 --- a/samples/snippets/get_model_evaluation_test.py +++ b/samples/snippets/get_model_evaluation_test.py @@ -28,7 +28,7 @@ def model_evaluation_id(): client = automl.AutoMlClient() model_full_id = client.model_path(PROJECT_ID, "us-central1", MODEL_ID) evaluation = None - for e in client.list_model_evaluations(model_full_id, ""): + for e in client.list_model_evaluations(parent=model_full_id, filter=""): evaluation = e break model_evaluation_id = evaluation.name.split( diff --git a/samples/snippets/get_operation_status.py b/samples/snippets/get_operation_status.py index 4e5c90f8..d2ac0cc2 100644 --- a/samples/snippets/get_operation_status.py +++ b/samples/snippets/get_operation_status.py @@ -24,7 +24,7 @@ def get_operation_status(operation_full_id): client = automl.AutoMlClient() # Get the latest state of a long-running operation. - response = client.transport._operations_client.get_operation( + response = client._transport.operations_client.get_operation( operation_full_id ) diff --git a/samples/snippets/get_operation_status_test.py b/samples/snippets/get_operation_status_test.py index c08095fc..bacbecc5 100644 --- a/samples/snippets/get_operation_status_test.py +++ b/samples/snippets/get_operation_status_test.py @@ -25,8 +25,9 @@ @pytest.fixture(scope="function") def operation_id(): client = automl.AutoMlClient() - project_location = client.location_path(PROJECT_ID, "us-central1") - generator = client.transport._operations_client.list_operations( + project_location = f"projects/{PROJECT_ID}/locations/us-central1" + + generator = client._transport.operations_client.list_operations( project_location, filter_="" ).pages page = next(generator) diff --git a/samples/snippets/import_dataset.py b/samples/snippets/import_dataset.py index f465bdb1..3334fb3a 100644 --- a/samples/snippets/import_dataset.py +++ b/samples/snippets/import_dataset.py @@ -30,10 +30,10 @@ def import_dataset(project_id, dataset_id, path): ) # Get the multiple Google Cloud Storage URIs input_uris = path.split(",") - gcs_source = automl.types.GcsSource(input_uris=input_uris) - input_config = automl.types.InputConfig(gcs_source=gcs_source) + gcs_source = automl.GcsSource(input_uris=input_uris) + input_config = automl.InputConfig(gcs_source=gcs_source) # Import data from the input URI - response = client.import_data(dataset_full_id, input_config) + response = client.import_data(name=dataset_full_id, input_config=input_config) print("Processing import...") print("Data imported. {}".format(response.result())) diff --git a/samples/snippets/language_entity_extraction_create_dataset.py b/samples/snippets/language_entity_extraction_create_dataset.py index 056ff22c..34d7ae5b 100644 --- a/samples/snippets/language_entity_extraction_create_dataset.py +++ b/samples/snippets/language_entity_extraction_create_dataset.py @@ -25,14 +25,14 @@ def create_dataset(project_id, display_name): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") - metadata = automl.types.TextExtractionDatasetMetadata() - dataset = automl.types.Dataset( + project_location = f"projects/{project_id}/locations/us-central1" + metadata = automl.TextExtractionDatasetMetadata() + dataset = automl.Dataset( display_name=display_name, text_extraction_dataset_metadata=metadata ) # Create a dataset with the dataset metadata in the region. - response = client.create_dataset(project_location, dataset) + response = client.create_dataset(parent=project_location, dataset=dataset) created_dataset = response.result() diff --git a/samples/snippets/language_entity_extraction_create_dataset_test.py b/samples/snippets/language_entity_extraction_create_dataset_test.py index 044a0d50..13cd9911 100644 --- a/samples/snippets/language_entity_extraction_create_dataset_test.py +++ b/samples/snippets/language_entity_extraction_create_dataset_test.py @@ -38,5 +38,5 @@ def test_entity_extraction_create_dataset(capsys): dataset_full_id = client.dataset_path( PROJECT_ID, "us-central1", dataset_id ) - response = client.delete_dataset(dataset_full_id) + response = client.delete_dataset(name=dataset_full_id) response.result() diff --git a/samples/snippets/language_entity_extraction_create_model.py b/samples/snippets/language_entity_extraction_create_model.py index 5e0748dd..178ebf9a 100644 --- a/samples/snippets/language_entity_extraction_create_model.py +++ b/samples/snippets/language_entity_extraction_create_model.py @@ -26,17 +26,17 @@ def create_model(project_id, dataset_id, display_name): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") + project_location = f"projects/{project_id}/locations/us-central1" # Leave model unset to use the default base model provided by Google - metadata = automl.types.TextExtractionModelMetadata() - model = automl.types.Model( + metadata = automl.TextExtractionModelMetadata() + model = automl.Model( display_name=display_name, dataset_id=dataset_id, text_extraction_model_metadata=metadata, ) # Create a model with the model metadata in the region. - response = client.create_model(project_location, model) + response = client.create_model(parent=project_location, model=model) print("Training operation name: {}".format(response.operation.name)) print("Training started...") diff --git a/samples/snippets/language_entity_extraction_predict.py b/samples/snippets/language_entity_extraction_predict.py index 40d7e89b..8caea632 100644 --- a/samples/snippets/language_entity_extraction_predict.py +++ b/samples/snippets/language_entity_extraction_predict.py @@ -26,18 +26,18 @@ def predict(project_id, model_id, content): prediction_client = automl.PredictionServiceClient() # Get the full path of the model. - model_full_id = prediction_client.model_path( + model_full_id = automl.AutoMlClient.model_path( project_id, "us-central1", model_id ) # Supported mime_types: 'text/plain', 'text/html' # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsnippet - text_snippet = automl.types.TextSnippet( + text_snippet = automl.TextSnippet( content=content, mime_type="text/plain" ) - payload = automl.types.ExamplePayload(text_snippet=text_snippet) + payload = automl.ExamplePayload(text_snippet=text_snippet) - response = prediction_client.predict(model_full_id, payload) + response = prediction_client.predict(name=model_full_id, payload=payload) for annotation_payload in response.payload: print( diff --git a/samples/snippets/language_entity_extraction_predict_test.py b/samples/snippets/language_entity_extraction_predict_test.py index 35dfddef..bba6e43d 100644 --- a/samples/snippets/language_entity_extraction_predict_test.py +++ b/samples/snippets/language_entity_extraction_predict_test.py @@ -28,10 +28,10 @@ def verify_model_state(): client = automl.AutoMlClient() model_full_id = client.model_path(PROJECT_ID, "us-central1", MODEL_ID) - model = client.get_model(model_full_id) - if model.deployment_state == automl.enums.Model.DeploymentState.UNDEPLOYED: + model = client.get_model(name=model_full_id) + if model.deployment_state == automl.Model.DeploymentState.UNDEPLOYED: # Deploy model if it is not deployed - response = client.deploy_model(model_full_id) + response = client.deploy_model(name=model_full_id) response.result() diff --git a/samples/snippets/language_sentiment_analysis_create_dataset.py b/samples/snippets/language_sentiment_analysis_create_dataset.py index 2caae065..519d9a82 100644 --- a/samples/snippets/language_sentiment_analysis_create_dataset.py +++ b/samples/snippets/language_sentiment_analysis_create_dataset.py @@ -25,22 +25,22 @@ def create_dataset(project_id, display_name): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") + project_location = f"projects/{project_id}/locations/us-central1" # Each dataset requires a sentiment score with a defined sentiment_max # value, for more information on TextSentimentDatasetMetadata, see: # https://cloud.google.com/natural-language/automl/docs/prepare#sentiment-analysis # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsentimentdatasetmetadata - metadata = automl.types.TextSentimentDatasetMetadata( + metadata = automl.TextSentimentDatasetMetadata( sentiment_max=4 ) # Possible max sentiment score: 1-10 - dataset = automl.types.Dataset( + dataset = automl.Dataset( display_name=display_name, text_sentiment_dataset_metadata=metadata ) # Create a dataset with the dataset metadata in the region. - response = client.create_dataset(project_location, dataset) + response = client.create_dataset(parent=project_location, dataset=dataset) created_dataset = response.result() diff --git a/samples/snippets/language_sentiment_analysis_create_dataset_test.py b/samples/snippets/language_sentiment_analysis_create_dataset_test.py index 239a154f..1ac54461 100644 --- a/samples/snippets/language_sentiment_analysis_create_dataset_test.py +++ b/samples/snippets/language_sentiment_analysis_create_dataset_test.py @@ -37,5 +37,5 @@ def test_sentiment_analysis_create_dataset(capsys): dataset_full_id = client.dataset_path( PROJECT_ID, "us-central1", dataset_id ) - response = client.delete_dataset(dataset_full_id) + response = client.delete_dataset(name=dataset_full_id) response.result() diff --git a/samples/snippets/language_sentiment_analysis_create_model.py b/samples/snippets/language_sentiment_analysis_create_model.py index 6eca50a7..40262aa4 100644 --- a/samples/snippets/language_sentiment_analysis_create_model.py +++ b/samples/snippets/language_sentiment_analysis_create_model.py @@ -26,18 +26,19 @@ def create_model(project_id, dataset_id, display_name): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") + project_location = f"projects/{project_id}/locations/us-central1" # Leave model unset to use the default base model provided by Google - metadata = automl.types.TextSentimentModelMetadata() - model = automl.types.Model( + metadata = automl.TextSentimentModelMetadata() + model = automl.Model( display_name=display_name, dataset_id=dataset_id, text_sentiment_model_metadata=metadata, ) # Create a model with the model metadata in the region. - response = client.create_model(project_location, model) + response = client.create_model(parent=project_location, model=model) print("Training operation name: {}".format(response.operation.name)) print("Training started...") # [END automl_language_sentiment_analysis_create_model] + return response diff --git a/samples/snippets/language_sentiment_analysis_create_model_test.py b/samples/snippets/language_sentiment_analysis_create_model_test.py index bf9d1978..406f9e1c 100644 --- a/samples/snippets/language_sentiment_analysis_create_model_test.py +++ b/samples/snippets/language_sentiment_analysis_create_model_test.py @@ -14,7 +14,6 @@ import os -from google.cloud import automl import pytest import language_sentiment_analysis_create_model @@ -25,13 +24,11 @@ @pytest.mark.slow def test_sentiment_analysis_create_model(capsys): - language_sentiment_analysis_create_model.create_model( + operation = language_sentiment_analysis_create_model.create_model( PROJECT_ID, DATASET_ID, "sentiment_test_create_model" ) out, _ = capsys.readouterr() assert "Training started" in out # Cancel the operation - operation_id = out.split("Training operation name: ")[1].split("\n")[0] - client = automl.AutoMlClient() - client.transport._operations_client.cancel_operation(operation_id) + operation.cancel() diff --git a/samples/snippets/language_sentiment_analysis_predict.py b/samples/snippets/language_sentiment_analysis_predict.py index cf459142..e2f5c777 100644 --- a/samples/snippets/language_sentiment_analysis_predict.py +++ b/samples/snippets/language_sentiment_analysis_predict.py @@ -26,18 +26,18 @@ def predict(project_id, model_id, content): prediction_client = automl.PredictionServiceClient() # Get the full path of the model. - model_full_id = prediction_client.model_path( + model_full_id = automl.AutoMlClient.model_path( project_id, "us-central1", model_id ) # Supported mime_types: 'text/plain', 'text/html' # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsnippet - text_snippet = automl.types.TextSnippet( + text_snippet = automl.TextSnippet( content=content, mime_type="text/plain" ) - payload = automl.types.ExamplePayload(text_snippet=text_snippet) + payload = automl.ExamplePayload(text_snippet=text_snippet) - response = prediction_client.predict(model_full_id, payload) + response = prediction_client.predict(name=model_full_id, payload=payload) for annotation_payload in response.payload: print( diff --git a/samples/snippets/language_sentiment_analysis_predict_test.py b/samples/snippets/language_sentiment_analysis_predict_test.py index bfd35649..ee32ebc5 100644 --- a/samples/snippets/language_sentiment_analysis_predict_test.py +++ b/samples/snippets/language_sentiment_analysis_predict_test.py @@ -29,10 +29,10 @@ def setup(): client = automl.AutoMlClient() model_full_id = client.model_path(PROJECT_ID, "us-central1", MODEL_ID) - model = client.get_model(model_full_id) - if model.deployment_state == automl.enums.Model.DeploymentState.UNDEPLOYED: + model = client.get_model(name=model_full_id) + if model.deployment_state == automl.Model.DeploymentState.UNDEPLOYED: # Deploy model if it is not deployed - response = client.deploy_model(model_full_id) + response = client.deploy_model(name=model_full_id) response.result() diff --git a/samples/snippets/language_text_classification_create_dataset.py b/samples/snippets/language_text_classification_create_dataset.py index f4a2add4..943ac92d 100644 --- a/samples/snippets/language_text_classification_create_dataset.py +++ b/samples/snippets/language_text_classification_create_dataset.py @@ -25,21 +25,21 @@ def create_dataset(project_id, display_name): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") + project_location = f"projects/{project_id}/locations/us-central1" # Specify the classification type # Types: # MultiLabel: Multiple labels are allowed for one example. # MultiClass: At most one label is allowed per example. - metadata = automl.types.TextClassificationDatasetMetadata( - classification_type=automl.enums.ClassificationType.MULTICLASS + metadata = automl.TextClassificationDatasetMetadata( + classification_type=automl.ClassificationType.MULTICLASS ) - dataset = automl.types.Dataset( + dataset = automl.Dataset( display_name=display_name, text_classification_dataset_metadata=metadata, ) # Create a dataset with the dataset metadata in the region. - response = client.create_dataset(project_location, dataset) + response = client.create_dataset(parent=project_location, dataset=dataset) created_dataset = response.result() diff --git a/samples/snippets/language_text_classification_create_dataset_test.py b/samples/snippets/language_text_classification_create_dataset_test.py index 771945ee..a00e6eb1 100644 --- a/samples/snippets/language_text_classification_create_dataset_test.py +++ b/samples/snippets/language_text_classification_create_dataset_test.py @@ -37,5 +37,5 @@ def test_text_classification_create_dataset(capsys): dataset_full_id = client.dataset_path( PROJECT_ID, "us-central1", dataset_id ) - response = client.delete_dataset(dataset_full_id) + response = client.delete_dataset(name=dataset_full_id) response.result() diff --git a/samples/snippets/language_text_classification_create_model.py b/samples/snippets/language_text_classification_create_model.py index b72ec8b4..ed996def 100644 --- a/samples/snippets/language_text_classification_create_model.py +++ b/samples/snippets/language_text_classification_create_model.py @@ -26,17 +26,17 @@ def create_model(project_id, dataset_id, display_name): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") + project_location = f"projects/{project_id}/locations/us-central1" # Leave model unset to use the default base model provided by Google - metadata = automl.types.TextClassificationModelMetadata() - model = automl.types.Model( + metadata = automl.TextClassificationModelMetadata() + model = automl.Model( display_name=display_name, dataset_id=dataset_id, text_classification_model_metadata=metadata, ) # Create a model with the model metadata in the region. - response = client.create_model(project_location, model) + response = client.create_model(parent=project_location, model=model) print(u"Training operation name: {}".format(response.operation.name)) print("Training started...") diff --git a/samples/snippets/language_text_classification_create_model_test.py b/samples/snippets/language_text_classification_create_model_test.py index 9b5c6f01..299e328a 100644 --- a/samples/snippets/language_text_classification_create_model_test.py +++ b/samples/snippets/language_text_classification_create_model_test.py @@ -34,4 +34,4 @@ def test_text_classification_create_model(capsys): # Cancel the operation operation_id = out.split("Training operation name: ")[1].split("\n")[0] client = automl.AutoMlClient() - client.transport._operations_client.cancel_operation(operation_id) + client._transport.operations_client.cancel_operation(operation_id) diff --git a/samples/snippets/language_text_classification_predict.py b/samples/snippets/language_text_classification_predict.py index 6edac71e..4baa8478 100644 --- a/samples/snippets/language_text_classification_predict.py +++ b/samples/snippets/language_text_classification_predict.py @@ -26,18 +26,18 @@ def predict(project_id, model_id, content): prediction_client = automl.PredictionServiceClient() # Get the full path of the model. - model_full_id = prediction_client.model_path( + model_full_id = automl.AutoMlClient.model_path( project_id, "us-central1", model_id ) # Supported mime_types: 'text/plain', 'text/html' # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsnippet - text_snippet = automl.types.TextSnippet( + text_snippet = automl.TextSnippet( content=content, mime_type="text/plain" ) - payload = automl.types.ExamplePayload(text_snippet=text_snippet) + payload = automl.ExamplePayload(text_snippet=text_snippet) - response = prediction_client.predict(model_full_id, payload) + response = prediction_client.predict(name=model_full_id, payload=payload) for annotation_payload in response.payload: print( diff --git a/samples/snippets/language_text_classification_predict_test.py b/samples/snippets/language_text_classification_predict_test.py index 36202f5b..1150d9d1 100644 --- a/samples/snippets/language_text_classification_predict_test.py +++ b/samples/snippets/language_text_classification_predict_test.py @@ -28,10 +28,10 @@ def verify_model_state(): client = automl.AutoMlClient() model_full_id = client.model_path(PROJECT_ID, "us-central1", MODEL_ID) - model = client.get_model(model_full_id) - if model.deployment_state == automl.enums.Model.DeploymentState.UNDEPLOYED: + model = client.get_model(name=model_full_id) + if model.deployment_state == automl.Model.DeploymentState.UNDEPLOYED: # Deploy model if it is not deployed - response = client.deploy_model(model_full_id) + response = client.deploy_model(name=model_full_id) response.result() diff --git a/samples/snippets/list_datasets.py b/samples/snippets/list_datasets.py index ae8c576e..ce92355c 100644 --- a/samples/snippets/list_datasets.py +++ b/samples/snippets/list_datasets.py @@ -28,19 +28,18 @@ def list_datasets(project_id): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") + project_location = f"projects/{project_id}/locations/us-central1" # List all the datasets available in the region. - response = client.list_datasets(project_location, "") + request = automl.ListDatasetsRequest(parent=project_location, filter="") + response = client.list_datasets(request=request) print("List of datasets:") for dataset in response: print("Dataset name: {}".format(dataset.name)) print("Dataset id: {}".format(dataset.name.split("/")[-1])) print("Dataset display name: {}".format(dataset.display_name)) - print("Dataset create time:") - print("\tseconds: {}".format(dataset.create_time.seconds)) - print("\tnanos: {}".format(dataset.create_time.nanos)) + print("Dataset create time: {}".format(dataset.create_time)) # [END automl_language_sentiment_analysis_list_datasets] # [END automl_language_text_classification_list_datasets] # [END automl_translate_list_datasets] diff --git a/samples/snippets/list_model_evaluations.py b/samples/snippets/list_model_evaluations.py index 3dcb7932..c5e29d70 100644 --- a/samples/snippets/list_model_evaluations.py +++ b/samples/snippets/list_model_evaluations.py @@ -32,16 +32,14 @@ def list_model_evaluations(project_id, model_id): model_full_id = client.model_path(project_id, "us-central1", model_id) print("List of model evaluations:") - for evaluation in client.list_model_evaluations(model_full_id, ""): + for evaluation in client.list_model_evaluations(parent=model_full_id, filter=""): print("Model evaluation name: {}".format(evaluation.name)) print( "Model annotation spec id: {}".format( evaluation.annotation_spec_id ) ) - print("Create Time:") - print("\tseconds: {}".format(evaluation.create_time.seconds)) - print("\tnanos: {}".format(evaluation.create_time.nanos / 1e9)) + print("Create Time: {}".format(evaluation.create_time)) print( "Evaluation example count: {}".format( evaluation.evaluated_example_count diff --git a/samples/snippets/list_models.py b/samples/snippets/list_models.py index 5c5dff67..d46ef104 100644 --- a/samples/snippets/list_models.py +++ b/samples/snippets/list_models.py @@ -23,15 +23,17 @@ def list_models(project_id): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") - response = client.list_models(project_location, "") + project_location = f"projects/{project_id}/locations/us-central1" + + request = automl.ListModelsRequest(parent=project_location, filter="") + response = client.list_models(request=request) print("List of models:") for model in response: # Display the model information. if ( model.deployment_state - == automl.enums.Model.DeploymentState.DEPLOYED + == automl.Model.DeploymentState.DEPLOYED ): deployment_state = "deployed" else: @@ -40,8 +42,6 @@ def list_models(project_id): print("Model name: {}".format(model.name)) print("Model id: {}".format(model.name.split("/")[-1])) print("Model display name: {}".format(model.display_name)) - print("Model create time:") - print("\tseconds: {}".format(model.create_time.seconds)) - print("\tnanos: {}".format(model.create_time.nanos)) + print("Model create time: {}".format(model.create_time)) print("Model deployment state: {}".format(deployment_state)) # [END automl_list_models] diff --git a/samples/snippets/list_operation_status.py b/samples/snippets/list_operation_status.py index 45534fda..679ff66e 100644 --- a/samples/snippets/list_operation_status.py +++ b/samples/snippets/list_operation_status.py @@ -23,10 +23,10 @@ def list_operation_status(project_id): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") + project_location = f"projects/{project_id}/locations/us-central1" # List all the operations names available in the region. - response = client.transport._operations_client.list_operations( - project_location, "" + response = client._transport.operations_client.list_operations( + name=project_location, filter_="", timeout=5 ) print("List of operations:") diff --git a/samples/snippets/model_test.py b/samples/snippets/model_test.py index fd2fabc3..da5f806f 100644 --- a/samples/snippets/model_test.py +++ b/samples/snippets/model_test.py @@ -31,13 +31,13 @@ def test_model_create_status_delete(capsys): # create model client = automl.AutoMlClient() model_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") - project_location = client.location_path(project_id, compute_region) + project_location = f"projects/{project_id}/locations/{compute_region}" my_model = { "display_name": model_name, "dataset_id": "3876092572857648864", "translation_model_metadata": {"base_model": ""}, } - response = client.create_model(project_location, my_model) + response = client.create_model(parent=project_location, model=my_model) operation_name = response.operation.name assert operation_name diff --git a/samples/snippets/translate_create_dataset.py b/samples/snippets/translate_create_dataset.py index 8f468679..b82b9b03 100644 --- a/samples/snippets/translate_create_dataset.py +++ b/samples/snippets/translate_create_dataset.py @@ -25,19 +25,19 @@ def create_dataset(project_id, display_name): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") + project_location = f"projects/{project_id}/locations/us-central1" # For a list of supported languages, see: # https://cloud.google.com/translate/automl/docs/languages - dataset_metadata = automl.types.TranslationDatasetMetadata( + dataset_metadata = automl.TranslationDatasetMetadata( source_language_code="en", target_language_code="ja" ) - dataset = automl.types.Dataset( + dataset = automl.Dataset( display_name=display_name, translation_dataset_metadata=dataset_metadata, ) # Create a dataset with the dataset metadata in the region. - response = client.create_dataset(project_location, dataset) + response = client.create_dataset(parent=project_location, dataset=dataset) created_dataset = response.result() diff --git a/samples/snippets/translate_create_dataset_test.py b/samples/snippets/translate_create_dataset_test.py index 9011da97..2f6dd223 100644 --- a/samples/snippets/translate_create_dataset_test.py +++ b/samples/snippets/translate_create_dataset_test.py @@ -36,5 +36,5 @@ def test_translate_create_dataset(capsys): dataset_full_id = client.dataset_path( PROJECT_ID, "us-central1", dataset_id ) - response = client.delete_dataset(dataset_full_id) + response = client.delete_dataset(name=dataset_full_id) response.result() diff --git a/samples/snippets/translate_create_model.py b/samples/snippets/translate_create_model.py index c83c1aa4..47304d4d 100644 --- a/samples/snippets/translate_create_model.py +++ b/samples/snippets/translate_create_model.py @@ -26,17 +26,17 @@ def create_model(project_id, dataset_id, display_name): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") + project_location = f"projects/{project_id}/locations/us-central1" # Leave model unset to use the default base model provided by Google - translation_model_metadata = automl.types.TranslationModelMetadata() - model = automl.types.Model( + translation_model_metadata = automl.TranslationModelMetadata() + model = automl.Model( display_name=display_name, dataset_id=dataset_id, translation_model_metadata=translation_model_metadata, ) # Create a model with the model metadata in the region. - response = client.create_model(project_location, model) + response = client.create_model(parent=project_location, model=model) print("Training operation name: {}".format(response.operation.name)) print("Training started...") diff --git a/samples/snippets/translate_create_model_test.py b/samples/snippets/translate_create_model_test.py index b564d7e0..118c7386 100644 --- a/samples/snippets/translate_create_model_test.py +++ b/samples/snippets/translate_create_model_test.py @@ -32,4 +32,4 @@ def test_translate_create_model(capsys): # Cancel the operation operation_id = out.split("Training operation name: ")[1].split("\n")[0] client = automl.AutoMlClient() - client.transport._operations_client.cancel_operation(operation_id) + client._transport.operations_client.cancel_operation(operation_id) diff --git a/samples/snippets/translate_predict.py b/samples/snippets/translate_predict.py index 31c965e8..45560319 100644 --- a/samples/snippets/translate_predict.py +++ b/samples/snippets/translate_predict.py @@ -26,7 +26,7 @@ def predict(project_id, model_id, file_path): prediction_client = automl.PredictionServiceClient() # Get the full path of the model. - model_full_id = prediction_client.model_path( + model_full_id = automl.AutoMlClient.model_path( project_id, "us-central1", model_id ) @@ -35,10 +35,10 @@ def predict(project_id, model_id, file_path): content = content_file.read() content.decode("utf-8") - text_snippet = automl.types.TextSnippet(content=content) - payload = automl.types.ExamplePayload(text_snippet=text_snippet) + text_snippet = automl.TextSnippet(content=content) + payload = automl.ExamplePayload(text_snippet=text_snippet) - response = prediction_client.predict(model_full_id, payload) + response = prediction_client.predict(name=model_full_id, payload=payload) translated_content = response.payload[0].translation.translated_content print(u"Translated content: {}".format(translated_content.content)) diff --git a/samples/snippets/translate_predict_test.py b/samples/snippets/translate_predict_test.py index cd31d98b..4f0c45a1 100644 --- a/samples/snippets/translate_predict_test.py +++ b/samples/snippets/translate_predict_test.py @@ -29,10 +29,10 @@ def setup(): client = automl.AutoMlClient() model_full_id = client.model_path(PROJECT_ID, "us-central1", MODEL_ID) - model = client.get_model(model_full_id) - if model.deployment_state == automl.enums.Model.DeploymentState.UNDEPLOYED: + model = client.get_model(name=model_full_id) + if model.deployment_state == automl.Model.DeploymentState.UNDEPLOYED: # Deploy model if it is not deployed - response = client.deploy_model(model_full_id) + response = client.deploy_model(name=model_full_id) response.result() diff --git a/samples/snippets/undeploy_model.py b/samples/snippets/undeploy_model.py index b737064d..25b5cdb7 100644 --- a/samples/snippets/undeploy_model.py +++ b/samples/snippets/undeploy_model.py @@ -25,7 +25,7 @@ def undeploy_model(project_id, model_id): client = automl.AutoMlClient() # Get the full path of the model. model_full_id = client.model_path(project_id, "us-central1", model_id) - response = client.undeploy_model(model_full_id) + response = client.undeploy_model(name=model_full_id) print("Model undeployment finished. {}".format(response.result())) # [END automl_undeploy_model] diff --git a/samples/snippets/vision_classification_create_dataset.py b/samples/snippets/vision_classification_create_dataset.py index 8981a795..3d4da1b9 100644 --- a/samples/snippets/vision_classification_create_dataset.py +++ b/samples/snippets/vision_classification_create_dataset.py @@ -25,22 +25,22 @@ def create_dataset(project_id, display_name): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") + project_location = f"projects/{project_id}/locations/us-central1" # Specify the classification type # Types: # MultiLabel: Multiple labels are allowed for one example. # MultiClass: At most one label is allowed per example. # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#classificationtype - metadata = automl.types.ImageClassificationDatasetMetadata( - classification_type=automl.enums.ClassificationType.MULTILABEL + metadata = automl.ImageClassificationDatasetMetadata( + classification_type=automl.ClassificationType.MULTILABEL ) - dataset = automl.types.Dataset( + dataset = automl.Dataset( display_name=display_name, image_classification_dataset_metadata=metadata, ) # Create a dataset with the dataset metadata in the region. - response = client.create_dataset(project_location, dataset) + response = client.create_dataset(parent=project_location, dataset=dataset) created_dataset = response.result() diff --git a/samples/snippets/vision_classification_create_dataset_test.py b/samples/snippets/vision_classification_create_dataset_test.py index aaa8a575..efd32810 100644 --- a/samples/snippets/vision_classification_create_dataset_test.py +++ b/samples/snippets/vision_classification_create_dataset_test.py @@ -40,5 +40,5 @@ def test_vision_classification_create_dataset(capsys): dataset_full_id = client.dataset_path( PROJECT_ID, "us-central1", dataset_id ) - response = client.delete_dataset(dataset_full_id) + response = client.delete_dataset(name=dataset_full_id) response.result() diff --git a/samples/snippets/vision_classification_create_model.py b/samples/snippets/vision_classification_create_model.py index 30505614..06cf7706 100644 --- a/samples/snippets/vision_classification_create_model.py +++ b/samples/snippets/vision_classification_create_model.py @@ -26,23 +26,24 @@ def create_model(project_id, dataset_id, display_name): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") + project_location = f"projects/{project_id}/locations/us-central1" # Leave model unset to use the default base model provided by Google # train_budget_milli_node_hours: The actual train_cost will be equal or # less than this value. # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#imageclassificationmodelmetadata - metadata = automl.types.ImageClassificationModelMetadata( + metadata = automl.ImageClassificationModelMetadata( train_budget_milli_node_hours=24000 ) - model = automl.types.Model( + model = automl.Model( display_name=display_name, dataset_id=dataset_id, image_classification_model_metadata=metadata, ) # Create a model with the model metadata in the region. - response = client.create_model(project_location, model) + response = client.create_model(parent=project_location, model=model) print("Training operation name: {}".format(response.operation.name)) print("Training started...") # [END automl_vision_classification_create_model] + return response diff --git a/samples/snippets/vision_classification_create_model_test.py b/samples/snippets/vision_classification_create_model_test.py index aea9926a..76358307 100644 --- a/samples/snippets/vision_classification_create_model_test.py +++ b/samples/snippets/vision_classification_create_model_test.py @@ -14,7 +14,6 @@ import os -from google.cloud import automl import pytest import vision_classification_create_model @@ -25,13 +24,11 @@ @pytest.mark.slow def test_vision_classification_create_model(capsys): - vision_classification_create_model.create_model( + operation = vision_classification_create_model.create_model( PROJECT_ID, DATASET_ID, "classification_test_create_model" ) out, _ = capsys.readouterr() assert "Training started" in out # Cancel the operation - operation_id = out.split("Training operation name: ")[1].split("\n")[0] - client = automl.AutoMlClient() - client.transport._operations_client.cancel_operation(operation_id) + operation.cancel() diff --git a/samples/snippets/vision_classification_deploy_model_node_count.py b/samples/snippets/vision_classification_deploy_model_node_count.py index b89cec73..98be955c 100644 --- a/samples/snippets/vision_classification_deploy_model_node_count.py +++ b/samples/snippets/vision_classification_deploy_model_node_count.py @@ -28,12 +28,15 @@ def deploy_model(project_id, model_id): # node count determines the number of nodes to deploy the model on. # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#imageclassificationmodeldeploymentmetadata - metadata = automl.types.ImageClassificationModelDeploymentMetadata( + metadata = automl.ImageClassificationModelDeploymentMetadata( node_count=2 ) - response = client.deploy_model( - model_full_id, image_classification_model_deployment_metadata=metadata + + request = automl.DeployModelRequest( + name=model_full_id, + image_classification_model_deployment_metadata=metadata ) + response = client.deploy_model(request=request) print("Model deployment finished. {}".format(response.result())) # [END automl_vision_classification_deploy_model_node_count] diff --git a/samples/snippets/vision_classification_predict.py b/samples/snippets/vision_classification_predict.py index c42606cc..4b1a2f7a 100644 --- a/samples/snippets/vision_classification_predict.py +++ b/samples/snippets/vision_classification_predict.py @@ -26,7 +26,7 @@ def predict(project_id, model_id, file_path): prediction_client = automl.PredictionServiceClient() # Get the full path of the model. - model_full_id = prediction_client.model_path( + model_full_id = automl.AutoMlClient.model_path( project_id, "us-central1", model_id ) @@ -34,15 +34,21 @@ def predict(project_id, model_id, file_path): with open(file_path, "rb") as content_file: content = content_file.read() - image = automl.types.Image(image_bytes=content) - payload = automl.types.ExamplePayload(image=image) + image = automl.Image(image_bytes=content) + payload = automl.ExamplePayload(image=image) # params is additional domain-specific parameters. # score_threshold is used to filter the result # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest params = {"score_threshold": "0.8"} - response = prediction_client.predict(model_full_id, payload, params) + request = automl.PredictRequest( + name=model_full_id, + payload=payload, + params=params + ) + response = prediction_client.predict(request=request) + print("Prediction results:") for result in response.payload: print("Predicted class name: {}".format(result.display_name)) diff --git a/samples/snippets/vision_classification_predict_test.py b/samples/snippets/vision_classification_predict_test.py index bc91796a..71af7433 100644 --- a/samples/snippets/vision_classification_predict_test.py +++ b/samples/snippets/vision_classification_predict_test.py @@ -29,10 +29,10 @@ def setup(): client = automl.AutoMlClient() model_full_id = client.model_path(PROJECT_ID, "us-central1", MODEL_ID) - model = client.get_model(model_full_id) - if model.deployment_state == automl.enums.Model.DeploymentState.UNDEPLOYED: + model = client.get_model(name=model_full_id) + if model.deployment_state == automl.Model.DeploymentState.UNDEPLOYED: # Deploy model if it is not deployed - response = client.deploy_model(model_full_id) + response = client.deploy_model(name=model_full_id) response.result() diff --git a/samples/snippets/vision_object_detection_create_dataset.py b/samples/snippets/vision_object_detection_create_dataset.py index 9a4b1436..29822e26 100644 --- a/samples/snippets/vision_object_detection_create_dataset.py +++ b/samples/snippets/vision_object_detection_create_dataset.py @@ -25,15 +25,15 @@ def create_dataset(project_id, display_name): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") - metadata = automl.types.ImageObjectDetectionDatasetMetadata() - dataset = automl.types.Dataset( + project_location = f"projects/{project_id}/locations/us-central1" + metadata = automl.ImageObjectDetectionDatasetMetadata() + dataset = automl.Dataset( display_name=display_name, image_object_detection_dataset_metadata=metadata, ) # Create a dataset with the dataset metadata in the region. - response = client.create_dataset(project_location, dataset) + response = client.create_dataset(parent=project_location, dataset=dataset) created_dataset = response.result() diff --git a/samples/snippets/vision_object_detection_create_dataset_test.py b/samples/snippets/vision_object_detection_create_dataset_test.py index 54fcf84d..e7a82e3a 100644 --- a/samples/snippets/vision_object_detection_create_dataset_test.py +++ b/samples/snippets/vision_object_detection_create_dataset_test.py @@ -40,5 +40,5 @@ def test_vision_object_detection_create_dataset(capsys): dataset_full_id = client.dataset_path( PROJECT_ID, "us-central1", dataset_id ) - response = client.delete_dataset(dataset_full_id) + response = client.delete_dataset(name=dataset_full_id) response.result() diff --git a/samples/snippets/vision_object_detection_create_model.py b/samples/snippets/vision_object_detection_create_model.py index 980be41c..d00c0a66 100644 --- a/samples/snippets/vision_object_detection_create_model.py +++ b/samples/snippets/vision_object_detection_create_model.py @@ -26,23 +26,24 @@ def create_model(project_id, dataset_id, display_name): client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, "us-central1") + project_location = f"projects/{project_id}/locations/us-central1" # Leave model unset to use the default base model provided by Google # train_budget_milli_node_hours: The actual train_cost will be equal or # less than this value. # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#imageobjectdetectionmodelmetadata - metadata = automl.types.ImageObjectDetectionModelMetadata( + metadata = automl.ImageObjectDetectionModelMetadata( train_budget_milli_node_hours=24000 ) - model = automl.types.Model( + model = automl.Model( display_name=display_name, dataset_id=dataset_id, image_object_detection_model_metadata=metadata, ) # Create a model with the model metadata in the region. - response = client.create_model(project_location, model) + response = client.create_model(parent=project_location, model=model) print("Training operation name: {}".format(response.operation.name)) print("Training started...") # [END automl_vision_object_detection_create_model] + return response diff --git a/samples/snippets/vision_object_detection_create_model_test.py b/samples/snippets/vision_object_detection_create_model_test.py index af93a69b..d5379056 100644 --- a/samples/snippets/vision_object_detection_create_model_test.py +++ b/samples/snippets/vision_object_detection_create_model_test.py @@ -14,7 +14,6 @@ import os -from google.cloud import automl import pytest import vision_object_detection_create_model @@ -25,13 +24,11 @@ @pytest.mark.slow def test_vision_object_detection_create_model(capsys): - vision_object_detection_create_model.create_model( + operation = vision_object_detection_create_model.create_model( PROJECT_ID, DATASET_ID, "object_test_create_model" ) out, _ = capsys.readouterr() assert "Training started" in out # Cancel the operation - operation_id = out.split("Training operation name: ")[1].split("\n")[0] - client = automl.AutoMlClient() - client.transport._operations_client.cancel_operation(operation_id) + operation.cancel() diff --git a/samples/snippets/vision_object_detection_deploy_model_node_count.py b/samples/snippets/vision_object_detection_deploy_model_node_count.py index 2daa4101..9a15d228 100644 --- a/samples/snippets/vision_object_detection_deploy_model_node_count.py +++ b/samples/snippets/vision_object_detection_deploy_model_node_count.py @@ -28,13 +28,15 @@ def deploy_model(project_id, model_id): # node count determines the number of nodes to deploy the model on. # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#imageobjectdetectionmodeldeploymentmetadata - metadata = automl.types.ImageObjectDetectionModelDeploymentMetadata( + metadata = automl.ImageObjectDetectionModelDeploymentMetadata( node_count=2 ) - response = client.deploy_model( - model_full_id, + + request = automl.DeployModelRequest( + name=model_full_id, image_object_detection_model_deployment_metadata=metadata, ) + response = client.deploy_model(request=request) print("Model deployment finished. {}".format(response.result())) # [END automl_vision_object_detection_deploy_model_node_count] diff --git a/samples/snippets/vision_object_detection_predict.py b/samples/snippets/vision_object_detection_predict.py index efd1534e..2a059d40 100644 --- a/samples/snippets/vision_object_detection_predict.py +++ b/samples/snippets/vision_object_detection_predict.py @@ -26,7 +26,7 @@ def predict(project_id, model_id, file_path): prediction_client = automl.PredictionServiceClient() # Get the full path of the model. - model_full_id = prediction_client.model_path( + model_full_id = automl.AutoMlClient.model_path( project_id, "us-central1", model_id ) @@ -34,15 +34,21 @@ def predict(project_id, model_id, file_path): with open(file_path, "rb") as content_file: content = content_file.read() - image = automl.types.Image(image_bytes=content) - payload = automl.types.ExamplePayload(image=image) + image = automl.Image(image_bytes=content) + payload = automl.ExamplePayload(image=image) # params is additional domain-specific parameters. # score_threshold is used to filter the result # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest params = {"score_threshold": "0.8"} - response = prediction_client.predict(model_full_id, payload, params) + request = automl.PredictRequest( + name=model_full_id, + payload=payload, + params=params + ) + + response = prediction_client.predict(request=request) print("Prediction results:") for result in response.payload: print("Predicted class name: {}".format(result.display_name)) diff --git a/samples/snippets/vision_object_detection_predict_test.py b/samples/snippets/vision_object_detection_predict_test.py index 24532663..6ba9af36 100644 --- a/samples/snippets/vision_object_detection_predict_test.py +++ b/samples/snippets/vision_object_detection_predict_test.py @@ -30,10 +30,10 @@ def verify_model_state(): client = automl.AutoMlClient() model_full_id = client.model_path(PROJECT_ID, "us-central1", MODEL_ID) - model = client.get_model(model_full_id) - if model.deployment_state == automl.enums.Model.DeploymentState.UNDEPLOYED: + model = client.get_model(name=model_full_id) + if model.deployment_state == automl.Model.DeploymentState.UNDEPLOYED: # Deploy model if it is not deployed - response = client.deploy_model(model_full_id) + response = client.deploy_model(name=model_full_id) response.result(600) # 10 minutes diff --git a/samples/tables/automl_tables_dataset.py b/samples/tables/automl_tables_dataset.py index 144f2ee6..a5d37945 100644 --- a/samples/tables/automl_tables_dataset.py +++ b/samples/tables/automl_tables_dataset.py @@ -47,30 +47,28 @@ def create_dataset(project_id, compute_region, dataset_display_name): print("Dataset metadata:") print("\t{}".format(dataset.tables_dataset_metadata)) print("Dataset example count: {}".format(dataset.example_count)) - print("Dataset create time:") - print("\tseconds: {}".format(dataset.create_time.seconds)) - print("\tnanos: {}".format(dataset.create_time.nanos)) + print("Dataset create time: {}".format(dataset.create_time)) # [END automl_tables_create_dataset] return dataset -def list_datasets(project_id, compute_region, filter_=None): +def list_datasets(project_id, compute_region, filter=None): """List all datasets.""" result = [] # [START automl_tables_list_datasets] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' - # filter_ = 'filter expression here' + # filter = 'filter expression here' from google.cloud import automl_v1beta1 as automl client = automl.TablesClient(project=project_id, region=compute_region) # List all the datasets available in the region by applying filter. - response = client.list_datasets(filter_=filter_) + response = client.list_datasets(filter=filter) print("List of datasets:") for dataset in response: @@ -105,9 +103,7 @@ def list_datasets(project_id, compute_region, filter_=None): ) ) print("Dataset example count: {}".format(dataset.example_count)) - print("Dataset create time:") - print("\tseconds: {}".format(dataset.create_time.seconds)) - print("\tnanos: {}".format(dataset.create_time.nanos)) + print("Dataset create time: {}".format(dataset.create_time)) print("\n") # [END automl_tables_list_datasets] @@ -137,9 +133,7 @@ def get_dataset(project_id, compute_region, dataset_display_name): print("Dataset metadata:") print("\t{}".format(dataset.tables_dataset_metadata)) print("Dataset example count: {}".format(dataset.example_count)) - print("Dataset create time:") - print("\tseconds: {}".format(dataset.create_time.seconds)) - print("\tnanos: {}".format(dataset.create_time.nanos)) + print("Dataset create time: {}".format(dataset.create_time)) return dataset diff --git a/samples/tables/automl_tables_model.py b/samples/tables/automl_tables_model.py index a77dfe62..b3193ff2 100644 --- a/samples/tables/automl_tables_model.py +++ b/samples/tables/automl_tables_model.py @@ -79,7 +79,7 @@ def get_operation_status(operation_full_id): client = automl.TablesClient() # Get the latest state of a long-running operation. - op = client.auto_ml_client.transport._operations_client.get_operation( + op = client.auto_ml_client._transport.operations_client.get_operation( operation_full_id ) @@ -88,27 +88,26 @@ def get_operation_status(operation_full_id): # [END automl_tables_get_operation_status] -def list_models(project_id, compute_region, filter_=None): +def list_models(project_id, compute_region, filter=None): """List all models.""" result = [] # [START automl_tables_list_models] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' - # filter_ = 'DATASET_DISPLAY_NAME_HERE' + # filter = 'DATASET_DISPLAY_NAME_HERE' from google.cloud import automl_v1beta1 as automl - from google.cloud.automl_v1beta1 import enums client = automl.TablesClient(project=project_id, region=compute_region) # List all the models available in the region by applying filter. - response = client.list_models(filter_=filter_) + response = client.list_models(filter=filter) print("List of models:") for model in response: # Retrieve deployment state. - if model.deployment_state == enums.Model.DeploymentState.DEPLOYED: + if model.deployment_state == automl.Model.DeploymentState.DEPLOYED: deployment_state = "deployed" else: deployment_state = "undeployed" @@ -133,9 +132,7 @@ def list_models(project_id, compute_region, filter_=None): metadata.train_cost_milli_node_hours ) ) - print("Model create time:") - print("\tseconds: {}".format(model.create_time.seconds)) - print("\tnanos: {}".format(model.create_time.nanos)) + print("Model create time: {}".format(model.create_time)) print("Model deployment state: {}".format(deployment_state)) print("\n") @@ -154,7 +151,6 @@ def get_model(project_id, compute_region, model_display_name): # model_display_name = 'MODEL_DISPLAY_NAME_HERE' from google.cloud import automl_v1beta1 as automl - from google.cloud.automl_v1beta1 import enums client = automl.TablesClient(project=project_id, region=compute_region) @@ -162,7 +158,7 @@ def get_model(project_id, compute_region, model_display_name): model = client.get_model(model_display_name=model_display_name) # Retrieve deployment state. - if model.deployment_state == enums.Model.DeploymentState.DEPLOYED: + if model.deployment_state == automl.Model.DeploymentState.DEPLOYED: deployment_state = "deployed" else: deployment_state = "undeployed" @@ -185,9 +181,7 @@ def get_model(project_id, compute_region, model_display_name): print("Features of top importance:") for feat in feat_list[:feat_to_show]: print(feat) - print("Model create time:") - print("\tseconds: {}".format(model.create_time.seconds)) - print("\tnanos: {}".format(model.create_time.nanos)) + print("Model create time: {}".format(model.create_time)) print("Model deployment state: {}".format(deployment_state)) # [END automl_tables_get_model] @@ -196,7 +190,7 @@ def get_model(project_id, compute_region, model_display_name): def list_model_evaluations( - project_id, compute_region, model_display_name, filter_=None + project_id, compute_region, model_display_name, filter=None ): """List model evaluations.""" @@ -206,7 +200,7 @@ def list_model_evaluations( # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' # model_display_name = 'MODEL_DISPLAY_NAME_HERE' - # filter_ = 'filter expression here' + # filter = 'filter expression here' from google.cloud import automl_v1beta1 as automl @@ -214,7 +208,7 @@ def list_model_evaluations( # List all the model evaluations in the model by applying filter. response = client.list_model_evaluations( - model_display_name=model_display_name, filter_=filter_ + model_display_name=model_display_name, filter=filter ) print("List of model evaluations:") @@ -226,9 +220,7 @@ def list_model_evaluations( evaluation.evaluated_example_count ) ) - print("Model evaluation time:") - print("\tseconds: {}".format(evaluation.create_time.seconds)) - print("\tnanos: {}".format(evaluation.create_time.nanos)) + print("Model evaluation time: {}".format(evaluation.create_time)) print("\n") # [END automl_tables_list_model_evaluations] result.append(evaluation) @@ -252,9 +244,10 @@ def get_model_evaluation( client = automl.TablesClient() # Get the full path of the model evaluation. - model_evaluation_full_id = client.auto_ml_client.model_evaluation_path( - project_id, compute_region, model_id, model_evaluation_id + model_path = client.auto_ml_client.model_path( + project_id, compute_region, model_id ) + model_evaluation_full_id = f"{model_path}/modelEvaluations/{model_evaluation_id}" # Get complete detail of the model evaluation. response = client.get_model_evaluation( @@ -267,7 +260,7 @@ def get_model_evaluation( def display_evaluation( - project_id, compute_region, model_display_name, filter_=None + project_id, compute_region, model_display_name, filter=None ): """Display evaluation.""" # [START automl_tables_display_evaluation] @@ -275,7 +268,7 @@ def display_evaluation( # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' # model_display_name = 'MODEL_DISPLAY_NAME_HERE' - # filter_ = 'filter expression here' + # filter = 'filter expression here' from google.cloud import automl_v1beta1 as automl @@ -283,7 +276,7 @@ def display_evaluation( # List all the model evaluations in the model by applying filter. response = client.list_model_evaluations( - model_display_name=model_display_name, filter_=filter_ + model_display_name=model_display_name, filter=filter ) # Iterate through the results. diff --git a/samples/tables/automl_tables_predict.py b/samples/tables/automl_tables_predict.py index 9787e1b9..a330213c 100644 --- a/samples/tables/automl_tables_predict.py +++ b/samples/tables/automl_tables_predict.py @@ -58,7 +58,7 @@ def predict( print("Prediction results:") for result in response.payload: print( - "Predicted class name: {}".format(result.tables.value.string_value) + "Predicted class name: {}".format(result.tables.value) ) print("Predicted class score: {}".format(result.tables.score)) diff --git a/samples/tables/batch_predict_test.py b/samples/tables/batch_predict_test.py index 203f4c8d..e3692cb6 100644 --- a/samples/tables/batch_predict_test.py +++ b/samples/tables/batch_predict_test.py @@ -16,7 +16,7 @@ import os -from google.cloud.automl_v1beta1.gapic import enums +from google.cloud.automl_v1beta1 import Model import pytest @@ -58,7 +58,7 @@ def test_batch_predict_bq(capsys): def ensure_model_online(): model = model_test.ensure_model_ready() - if model.deployment_state != enums.Model.DeploymentState.DEPLOYED: + if model.deployment_state != Model.DeploymentState.DEPLOYED: automl_tables_model.deploy_model(PROJECT, REGION, model.display_name) return automl_tables_model.get_model(PROJECT, REGION, model.display_name) diff --git a/samples/tables/endpoint_test.py b/samples/tables/endpoint_test.py index 5a20aba5..6af6b8da 100644 --- a/samples/tables/endpoint_test.py +++ b/samples/tables/endpoint_test.py @@ -23,4 +23,4 @@ def test_client_creation(capsys): automl_tables_set_endpoint.create_client_with_endpoint(PROJECT) out, _ = capsys.readouterr() - assert "GRPCIterator" in out + assert "ListDatasetsPager" in out diff --git a/samples/tables/predict_test.py b/samples/tables/predict_test.py index d608e182..1da6bfc2 100644 --- a/samples/tables/predict_test.py +++ b/samples/tables/predict_test.py @@ -16,7 +16,7 @@ import os -from google.cloud.automl_v1beta1.gapic import enums +from google.cloud.automl_v1beta1 import Model import automl_tables_model import automl_tables_predict @@ -58,7 +58,7 @@ def test_predict(capsys): def ensure_model_online(): model = model_test.ensure_model_ready() - if model.deployment_state != enums.Model.DeploymentState.DEPLOYED: + if model.deployment_state != Model.DeploymentState.DEPLOYED: automl_tables_model.deploy_model(PROJECT, REGION, model.display_name) return automl_tables_model.get_model(PROJECT, REGION, model.display_name) diff --git a/scripts/fixup_automl_v1_keywords.py b/scripts/fixup_automl_v1_keywords.py new file mode 100644 index 00000000..85b5fc68 --- /dev/null +++ b/scripts/fixup_automl_v1_keywords.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class automlCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'batch_predict': ('name', 'input_config', 'output_config', 'params', ), + 'create_dataset': ('parent', 'dataset', ), + 'create_model': ('parent', 'model', ), + 'delete_dataset': ('name', ), + 'delete_model': ('name', ), + 'deploy_model': ('name', 'image_object_detection_model_deployment_metadata', 'image_classification_model_deployment_metadata', ), + 'export_data': ('name', 'output_config', ), + 'export_model': ('name', 'output_config', ), + 'get_annotation_spec': ('name', ), + 'get_dataset': ('name', ), + 'get_model': ('name', ), + 'get_model_evaluation': ('name', ), + 'import_data': ('name', 'input_config', ), + 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_models': ('parent', 'filter', 'page_size', 'page_token', ), + 'predict': ('name', 'payload', 'params', ), + 'undeploy_model': ('name', ), + 'update_dataset': ('dataset', 'update_mask', ), + 'update_model': ('model', 'update_mask', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=automlCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the automl client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/scripts/fixup_automl_v1beta1_keywords.py b/scripts/fixup_automl_v1beta1_keywords.py new file mode 100644 index 00000000..1644607f --- /dev/null +++ b/scripts/fixup_automl_v1beta1_keywords.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class automlCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'batch_predict': ('name', 'input_config', 'output_config', 'params', ), + 'create_dataset': ('parent', 'dataset', ), + 'create_model': ('parent', 'model', ), + 'delete_dataset': ('name', ), + 'delete_model': ('name', ), + 'deploy_model': ('name', 'image_object_detection_model_deployment_metadata', 'image_classification_model_deployment_metadata', ), + 'export_data': ('name', 'output_config', ), + 'export_evaluated_examples': ('name', 'output_config', ), + 'export_model': ('name', 'output_config', ), + 'get_annotation_spec': ('name', ), + 'get_column_spec': ('name', 'field_mask', ), + 'get_dataset': ('name', ), + 'get_model': ('name', ), + 'get_model_evaluation': ('name', ), + 'get_table_spec': ('name', 'field_mask', ), + 'import_data': ('name', 'input_config', ), + 'list_column_specs': ('parent', 'field_mask', 'filter', 'page_size', 'page_token', ), + 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_models': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_table_specs': ('parent', 'field_mask', 'filter', 'page_size', 'page_token', ), + 'predict': ('name', 'payload', 'params', ), + 'undeploy_model': ('name', ), + 'update_column_spec': ('column_spec', 'update_mask', ), + 'update_dataset': ('dataset', 'update_mask', ), + 'update_table_spec': ('table_spec', 'update_mask', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=automlCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the automl client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/setup.py b/setup.py index e6af67da..0d476b09 100644 --- a/setup.py +++ b/setup.py @@ -22,8 +22,9 @@ version = "1.0.1" release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.14.0, < 2.0.0dev", - 'enum34; python_version < "3.4"', + "google-api-core[grpc] >= 1.22.0, < 2.0.0dev", + "proto-plus >= 1.4.0", + "libcst >= 0.2.5", ] extras = { "pandas": ["pandas>=0.17.1"], @@ -37,7 +38,9 @@ readme = readme_file.read() packages = [ - package for package in setuptools.find_packages() if package.startswith("google") + package + for package in setuptools.PEP420PackageFinder.find() + if package.startswith("google") ] namespaces = ["google"] @@ -58,12 +61,10 @@ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "Operating System :: OS Independent", "Topic :: Internet", ], @@ -72,7 +73,11 @@ namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", + python_requires=">=3.6", + scripts=[ + "scripts/fixup_automl_v1_keywords.py", + "scripts/fixup_automl_v1beta1_keywords.py", + ], include_package_data=True, zip_safe=False, ) diff --git a/synth.metadata b/synth.metadata index cf6467b0..6c3bc216 100644 --- a/synth.metadata +++ b/synth.metadata @@ -3,30 +3,30 @@ { "git": { "name": ".", - "remote": "git@github.com:googleapis/python-automl.git", - "sha": "ec9cb308914fe2d5d19cf612c9a51a38ed77ee64" + "remote": "git@github.com:googleapis/python-automl", + "sha": "9b218b1f1cd0caef664e51064baf5f4af07a97c1" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "fb84629a56703d04f0b5304c4a9ade7313ebd92d", - "internalRef": "325339219" + "sha": "17de2b31f9450385e739bedeeaac6e1ec4f239a8", + "internalRef": "327504150" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "5f2f711c91199ba2f609d3f06a2fe22aee4e5be3" + "sha": "9602086c6c5b05db77950c7f7495a2a3868f3537" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "5f2f711c91199ba2f609d3f06a2fe22aee4e5be3" + "sha": "9602086c6c5b05db77950c7f7495a2a3868f3537" } } ], diff --git a/synth.py b/synth.py index 16b099ef..017d2a67 100644 --- a/synth.py +++ b/synth.py @@ -36,114 +36,32 @@ include_protos=True ) - s.move(library / f"google/cloud/automl_{version}") - s.move(library / f"tests/unit/gapic/{version}") - s.move(library / f"docs/gapic/{version}") -s.move(library / f"docs/conf.py") - -# Use the highest version library to generate import alias. -s.move(library / "google/cloud/automl.py") + s.move(library, excludes=["README.rst", "docs/index.rst", "setup.py"]) # Add TablesClient and GcsClient to v1beta1 s.replace( - f"google/cloud/automl_v1beta1/__init__.py", - f"from google.cloud.automl_v1beta1.gapic import prediction_service_client", - f"from google.cloud.automl_v1beta1.gapic import prediction_service_client\n" - f"from google.cloud.automl_v1beta1.tables import tables_client\n" - f"from google.cloud.automl_v1beta1.tables import gcs_client" - f"\n\n" - f"class TablesClient(tables_client.TablesClient):" - f" __doc__ = tables_client.TablesClient.__doc__" - f"\n\nclass GcsClient(gcs_client.GcsClient):" - f" __doc__ = gcs_client.GcsClient.__doc__", +f"google/cloud/automl_v1beta1/__init__.py", +"""from \.services\.auto_ml import AutoMlClient +from \.services\.prediction_service import PredictionServiceClient""", +"""from .services.auto_ml import AutoMlClient +from .services.prediction_service import PredictionServiceClient +from .tables.gcs_client import GcsClient +from .tables.tables_client import TablesClient""" ) s.replace( f"google/cloud/automl_v1beta1/__init__.py", - f"""__all__ = \( - 'enums', - 'types', - 'AutoMlClient', - 'PredictionServiceClient', -\)""", - f'__all__ = ("enums", "types", "AutoMlClient", "PredictionServiceClient", "TablesClient", "GcsClient")', -) - -# Fixup issues in generated code -s.replace( - "**/gapic/*_client.py", - r"metadata_type=operations_pb2.OperationMetadata", - r"metadata_type=proto_operations_pb2.OperationMetadata", -) - -# Fix spacing/'::' issues in docstrings -s.replace( - "google/cloud/automl_v1beta1/gapic/prediction_service_client.py", "^\s+::", "" + f"""__all__ = \(""", + """__all__ = ("GcsClient", "TablesClient",""" ) -s.replace( - "google/cloud/automl_v1beta1/gapic/auto_ml_client.py", - "^(\s+)(::)\n\n\s+?([^\s])", - " \g<1>\g<2>\n \g<1>\g<3>", -) -# Remove 'raw-latex' sections with sample JSON Lines files -s.replace( - "google/cloud/**/io_pb2.py", - r"""Sample in-line - JSON Lines file.*?\}`\n""", - "\n", - flags=re.DOTALL, -) - -# Remove 'raw-latex' sections with sample JSON Lines files -s.replace( - "google/cloud/**/io_pb2.py", - r"""Sample - in-line JSON Lines.*?(\n\s+-\s+For Text Classification.*\n)""", - "\g<1>", - flags=re.DOTALL, -) - - -s.replace("google/cloud/**/io_pb2.py", r":raw-latex:`\\t `", r"\\\\t") - -# Remove html bits that can't be rendered correctly -s.replace( - "google/cloud/automl_v1/**/io_pb2.py", - r""".. raw:: html.+? - \""", - r"", - flags=re.DOTALL, -) - -# Remove raw-latex wrapping newline -s.replace("google/cloud/automl_v1/**/io_pb2.py", r""":raw-latex:`\\n`""", r"``\\\\n``") - -# Make \n visible in JSONL samples -s.replace("google/cloud/**/io_pb2.py", r"\}\\n", r"}\\\\n") - -# properly escape emphasis -s.replace("google/cloud/**/*.py", -"""image_classification_dataset_metadata:\*""", -"""``image_classification_dataset_metadata``""") - -s.replace("google/cloud/**/*.py", -"""video_classification_model_metadata:\*""", -"""``video_classification_model_metadata:*``""") - -# Escape '_' at the end of the line in pb2 docstrings -s.replace( -"google/cloud/**/*_pb2.py", -"""\_$""", -"""\_""", -) # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- templated_files = common.py_library( - unit_cov_level=82, cov_level=83, samples=True + unit_cov_level=82, cov_level=83, samples=True, microgenerator=True ) python.py_samples(skip_readmes=True) diff --git a/tests/system/gapic/v1beta1/test_system_tables_client_v1.py b/tests/system/gapic/v1beta1/test_system_tables_client_v1.py index 27f2e884..6092c43a 100644 --- a/tests/system/gapic/v1beta1/test_system_tables_client_v1.py +++ b/tests/system/gapic/v1beta1/test_system_tables_client_v1.py @@ -24,7 +24,6 @@ from google.cloud import automl_v1beta1 from google.api_core import exceptions -from google.cloud.automl_v1beta1.gapic import enums from test_utils.vpcsc_config import vpcsc_config @@ -270,7 +269,7 @@ def test_online_predict(self): def ensure_model_online(self, client): model = self.ensure_model_ready(client) - if model.deployment_state != enums.Model.DeploymentState.DEPLOYED: + if model.deployment_state != automl_v1beta1.Model.DeploymentState.DEPLOYED: client.deploy_model(model=model).result() return client.get_model(model_name=model.name) diff --git a/tests/unit/gapic/automl_v1/__init__.py b/tests/unit/gapic/automl_v1/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/tests/unit/gapic/automl_v1/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/gapic/automl_v1/test_auto_ml.py b/tests/unit/gapic/automl_v1/test_auto_ml.py new file mode 100644 index 00000000..42ad394e --- /dev/null +++ b/tests/unit/gapic/automl_v1/test_auto_ml.py @@ -0,0 +1,5000 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.automl_v1.services.auto_ml import AutoMlAsyncClient +from google.cloud.automl_v1.services.auto_ml import AutoMlClient +from google.cloud.automl_v1.services.auto_ml import pagers +from google.cloud.automl_v1.services.auto_ml import transports +from google.cloud.automl_v1.types import annotation_spec +from google.cloud.automl_v1.types import classification +from google.cloud.automl_v1.types import dataset +from google.cloud.automl_v1.types import dataset as gca_dataset +from google.cloud.automl_v1.types import detection +from google.cloud.automl_v1.types import image +from google.cloud.automl_v1.types import io +from google.cloud.automl_v1.types import model +from google.cloud.automl_v1.types import model as gca_model +from google.cloud.automl_v1.types import model_evaluation +from google.cloud.automl_v1.types import operations +from google.cloud.automl_v1.types import service +from google.cloud.automl_v1.types import text +from google.cloud.automl_v1.types import text_extraction +from google.cloud.automl_v1.types import text_sentiment +from google.cloud.automl_v1.types import translation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert AutoMlClient._get_default_mtls_endpoint(None) is None + assert AutoMlClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ( + AutoMlClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + ) + assert ( + AutoMlClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + AutoMlClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert AutoMlClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient]) +def test_auto_ml_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "automl.googleapis.com:443" + + +def test_auto_ml_client_get_transport_class(): + transport = AutoMlClient.get_transport_class() + assert transport == transports.AutoMlGrpcTransport + + transport = AutoMlClient.get_transport_class("grpc") + assert transport == transports.AutoMlGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +@mock.patch.object( + AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient) +) +@mock.patch.object( + AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient) +) +def test_auto_ml_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(AutoMlClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(AutoMlClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=client_cert_source_callback, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and default_client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", but client_cert_source and default_client_cert_source are None. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_auto_ml_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_auto_ml_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_auto_ml_client_client_options_from_dict(): + with mock.patch( + "google.cloud.automl_v1.services.auto_ml.transports.AutoMlGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = AutoMlClient(client_options={"api_endpoint": "squid.clam.whelk"}) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_dataset( + transport: str = "grpc", request_type=service.CreateDatasetRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.CreateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_dataset_from_dict(): + test_create_dataset(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_dataset_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.CreateDatasetRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_dataset_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateDatasetRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_dataset), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_dataset_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateDatasetRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_dataset), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_dataset_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_dataset( + parent="parent_value", + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].dataset == gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ) + + +def test_create_dataset_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_dataset( + service.CreateDatasetRequest(), + parent="parent_value", + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + ) + + +@pytest.mark.asyncio +async def test_create_dataset_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_dataset( + parent="parent_value", + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].dataset == gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ) + + +@pytest.mark.asyncio +async def test_create_dataset_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_dataset( + service.CreateDatasetRequest(), + parent="parent_value", + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + ) + + +def test_get_dataset(transport: str = "grpc", request_type=service.GetDatasetRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset( + name="name_value", + display_name="display_name_value", + description="description_value", + example_count=1396, + etag="etag_value", + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ), + ) + + response = client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.example_count == 1396 + + assert response.etag == "etag_value" + + +def test_get_dataset_from_dict(): + test_get_dataset(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_dataset_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.GetDatasetRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + dataset.Dataset( + name="name_value", + display_name="display_name_value", + description="description_value", + example_count=1396, + etag="etag_value", + ) + ) + + response = await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.example_count == 1396 + + assert response.etag == "etag_value" + + +def test_get_dataset_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetDatasetRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_dataset), "__call__") as call: + call.return_value = dataset.Dataset() + + client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_dataset_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetDatasetRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_dataset), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + + await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_dataset_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_dataset(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_dataset_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_dataset( + service.GetDatasetRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_dataset_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_dataset(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_dataset_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_dataset( + service.GetDatasetRequest(), name="name_value", + ) + + +def test_list_datasets( + transport: str = "grpc", request_type=service.ListDatasetsRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_datasets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListDatasetsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListDatasetsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_datasets_from_dict(): + test_list_datasets(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_datasets_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.ListDatasetsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_datasets), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListDatasetsResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_datasets_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListDatasetsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_datasets), "__call__") as call: + call.return_value = service.ListDatasetsResponse() + + client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_datasets_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListDatasetsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_datasets), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListDatasetsResponse() + ) + + await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_datasets_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_datasets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListDatasetsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_datasets(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_datasets_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_datasets( + service.ListDatasetsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_datasets_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_datasets), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListDatasetsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListDatasetsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_datasets(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_datasets_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_datasets( + service.ListDatasetsRequest(), parent="parent_value", + ) + + +def test_list_datasets_pager(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_datasets), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),], + next_page_token="abc", + ), + service.ListDatasetsResponse(datasets=[], next_page_token="def",), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(),], next_page_token="ghi", + ), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_datasets(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, dataset.Dataset) for i in results) + + +def test_list_datasets_pages(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_datasets), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),], + next_page_token="abc", + ), + service.ListDatasetsResponse(datasets=[], next_page_token="def",), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(),], next_page_token="ghi", + ), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(),], + ), + RuntimeError, + ) + pages = list(client.list_datasets(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_datasets_async_pager(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_datasets), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),], + next_page_token="abc", + ), + service.ListDatasetsResponse(datasets=[], next_page_token="def",), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(),], next_page_token="ghi", + ), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(),], + ), + RuntimeError, + ) + async_pager = await client.list_datasets(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, dataset.Dataset) for i in responses) + + +@pytest.mark.asyncio +async def test_list_datasets_async_pages(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_datasets), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),], + next_page_token="abc", + ), + service.ListDatasetsResponse(datasets=[], next_page_token="def",), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(),], next_page_token="ghi", + ), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(),], + ), + RuntimeError, + ) + pages = [] + async for page in (await client.list_datasets(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_update_dataset( + transport: str = "grpc", request_type=service.UpdateDatasetRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset( + name="name_value", + display_name="display_name_value", + description="description_value", + example_count=1396, + etag="etag_value", + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ), + ) + + response = client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.UpdateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.example_count == 1396 + + assert response.etag == "etag_value" + + +def test_update_dataset_from_dict(): + test_update_dataset(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_dataset_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.UpdateDatasetRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_dataset.Dataset( + name="name_value", + display_name="display_name_value", + description="description_value", + example_count=1396, + etag="etag_value", + ) + ) + + response = await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.example_count == 1396 + + assert response.etag == "etag_value" + + +def test_update_dataset_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateDatasetRequest() + request.dataset.name = "dataset.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_dataset), "__call__") as call: + call.return_value = gca_dataset.Dataset() + + client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "dataset.name=dataset.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_dataset_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateDatasetRequest() + request.dataset.name = "dataset.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_dataset), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + + await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "dataset.name=dataset.name/value",) in kw[ + "metadata" + ] + + +def test_update_dataset_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_dataset( + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].dataset == gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ) + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_dataset_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_dataset( + service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_dataset_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_dataset( + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].dataset == gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ) + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_dataset_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_dataset( + service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_dataset( + transport: str = "grpc", request_type=service.DeleteDatasetRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.DeleteDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_dataset_from_dict(): + test_delete_dataset(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_dataset_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.DeleteDatasetRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_dataset_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteDatasetRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_dataset), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_dataset_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteDatasetRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_dataset), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_dataset_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_dataset(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_dataset_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_dataset( + service.DeleteDatasetRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_dataset(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_dataset( + service.DeleteDatasetRequest(), name="name_value", + ) + + +def test_import_data(transport: str = "grpc", request_type=service.ImportDataRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.import_data), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ImportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_data_from_dict(): + test_import_data(request_type=dict) + + +@pytest.mark.asyncio +async def test_import_data_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.ImportDataRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.import_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_data_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ImportDataRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.import_data), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_import_data_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ImportDataRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.import_data), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_import_data_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.import_data), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_data( + name="name_value", + input_config=io.InputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].input_config == io.InputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ) + + +def test_import_data_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_data( + service.ImportDataRequest(), + name="name_value", + input_config=io.InputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + ) + + +@pytest.mark.asyncio +async def test_import_data_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.import_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_data( + name="name_value", + input_config=io.InputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].input_config == io.InputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ) + + +@pytest.mark.asyncio +async def test_import_data_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_data( + service.ImportDataRequest(), + name="name_value", + input_config=io.InputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + ) + + +def test_export_data(transport: str = "grpc", request_type=service.ExportDataRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.export_data), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ExportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_data_from_dict(): + test_export_data(request_type=dict) + + +@pytest.mark.asyncio +async def test_export_data_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.ExportDataRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.export_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_data_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportDataRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.export_data), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_export_data_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportDataRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.export_data), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_export_data_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.export_data), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_data( + name="name_value", + output_config=io.OutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].output_config == io.OutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ) + + +def test_export_data_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_data( + service.ExportDataRequest(), + name="name_value", + output_config=io.OutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + +@pytest.mark.asyncio +async def test_export_data_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.export_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_data( + name="name_value", + output_config=io.OutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].output_config == io.OutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ) + + +@pytest.mark.asyncio +async def test_export_data_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_data( + service.ExportDataRequest(), + name="name_value", + output_config=io.OutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + +def test_get_annotation_spec( + transport: str = "grpc", request_type=service.GetAnnotationSpecRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_annotation_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec( + name="name_value", display_name="display_name_value", example_count=1396, + ) + + response = client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetAnnotationSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.example_count == 1396 + + +def test_get_annotation_spec_from_dict(): + test_get_annotation_spec(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_annotation_spec_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.GetAnnotationSpecRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_annotation_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + annotation_spec.AnnotationSpec( + name="name_value", + display_name="display_name_value", + example_count=1396, + ) + ) + + response = await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.example_count == 1396 + + +def test_get_annotation_spec_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetAnnotationSpecRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_annotation_spec), "__call__" + ) as call: + call.return_value = annotation_spec.AnnotationSpec() + + client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_annotation_spec_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetAnnotationSpecRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_annotation_spec), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + annotation_spec.AnnotationSpec() + ) + + await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_annotation_spec_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_annotation_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_annotation_spec(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_annotation_spec_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_annotation_spec( + service.GetAnnotationSpecRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_annotation_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + annotation_spec.AnnotationSpec() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_annotation_spec(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_annotation_spec( + service.GetAnnotationSpecRequest(), name="name_value", + ) + + +def test_create_model(transport: str = "grpc", request_type=service.CreateModelRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.CreateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_model_from_dict(): + test_create_model(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_model_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.CreateModelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_model_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateModelRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_model), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_model_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateModelRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_model), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_model_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_model( + parent="parent_value", + model=gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].model == gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ) + + +def test_create_model_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_model( + service.CreateModelRequest(), + parent="parent_value", + model=gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ), + ) + + +@pytest.mark.asyncio +async def test_create_model_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_model( + parent="parent_value", + model=gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].model == gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ) + + +@pytest.mark.asyncio +async def test_create_model_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_model( + service.CreateModelRequest(), + parent="parent_value", + model=gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ), + ) + + +def test_get_model(transport: str = "grpc", request_type=service.GetModelRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model( + name="name_value", + display_name="display_name_value", + dataset_id="dataset_id_value", + deployment_state=model.Model.DeploymentState.DEPLOYED, + etag="etag_value", + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ), + ) + + response = client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.dataset_id == "dataset_id_value" + + assert response.deployment_state == model.Model.DeploymentState.DEPLOYED + + assert response.etag == "etag_value" + + +def test_get_model_from_dict(): + test_get_model(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_model_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.GetModelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model.Model( + name="name_value", + display_name="display_name_value", + dataset_id="dataset_id_value", + deployment_state=model.Model.DeploymentState.DEPLOYED, + etag="etag_value", + ) + ) + + response = await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.dataset_id == "dataset_id_value" + + assert response.deployment_state == model.Model.DeploymentState.DEPLOYED + + assert response.etag == "etag_value" + + +def test_get_model_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_model), "__call__") as call: + call.return_value = model.Model() + + client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_model_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_model), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + + await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_model_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_model_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model( + service.GetModelRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_model_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_model_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model( + service.GetModelRequest(), name="name_value", + ) + + +def test_list_models(transport: str = "grpc", request_type=service.ListModelsRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_models), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_models_from_dict(): + test_list_models(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_models_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.ListModelsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_models), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListModelsResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_models_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_models), "__call__") as call: + call.return_value = service.ListModelsResponse() + + client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_models_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_models), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListModelsResponse() + ) + + await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_models_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_models), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_models(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_models_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_models( + service.ListModelsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_models_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_models), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListModelsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_models(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_models_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_models( + service.ListModelsRequest(), parent="parent_value", + ) + + +def test_list_models_pager(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_models), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[model.Model(), model.Model(), model.Model(),], + next_page_token="abc", + ), + service.ListModelsResponse(model=[], next_page_token="def",), + service.ListModelsResponse(model=[model.Model(),], next_page_token="ghi",), + service.ListModelsResponse(model=[model.Model(), model.Model(),],), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_models(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, model.Model) for i in results) + + +def test_list_models_pages(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_models), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[model.Model(), model.Model(), model.Model(),], + next_page_token="abc", + ), + service.ListModelsResponse(model=[], next_page_token="def",), + service.ListModelsResponse(model=[model.Model(),], next_page_token="ghi",), + service.ListModelsResponse(model=[model.Model(), model.Model(),],), + RuntimeError, + ) + pages = list(client.list_models(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_models_async_pager(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_models), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[model.Model(), model.Model(), model.Model(),], + next_page_token="abc", + ), + service.ListModelsResponse(model=[], next_page_token="def",), + service.ListModelsResponse(model=[model.Model(),], next_page_token="ghi",), + service.ListModelsResponse(model=[model.Model(), model.Model(),],), + RuntimeError, + ) + async_pager = await client.list_models(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model.Model) for i in responses) + + +@pytest.mark.asyncio +async def test_list_models_async_pages(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_models), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[model.Model(), model.Model(), model.Model(),], + next_page_token="abc", + ), + service.ListModelsResponse(model=[], next_page_token="def",), + service.ListModelsResponse(model=[model.Model(),], next_page_token="ghi",), + service.ListModelsResponse(model=[model.Model(), model.Model(),],), + RuntimeError, + ) + pages = [] + async for page in (await client.list_models(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_delete_model(transport: str = "grpc", request_type=service.DeleteModelRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_from_dict(): + test_delete_model(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_model_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.DeleteModelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_model), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_model_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_model), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_model_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_model_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model( + service.DeleteModelRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_model_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_model_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model( + service.DeleteModelRequest(), name="name_value", + ) + + +def test_update_model(transport: str = "grpc", request_type=service.UpdateModelRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model( + name="name_value", + display_name="display_name_value", + dataset_id="dataset_id_value", + deployment_state=gca_model.Model.DeploymentState.DEPLOYED, + etag="etag_value", + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ), + ) + + response = client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.UpdateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model.Model) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.dataset_id == "dataset_id_value" + + assert response.deployment_state == gca_model.Model.DeploymentState.DEPLOYED + + assert response.etag == "etag_value" + + +def test_update_model_from_dict(): + test_update_model(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_model_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.UpdateModelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_model.Model( + name="name_value", + display_name="display_name_value", + dataset_id="dataset_id_value", + deployment_state=gca_model.Model.DeploymentState.DEPLOYED, + etag="etag_value", + ) + ) + + response = await client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model.Model) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.dataset_id == "dataset_id_value" + + assert response.deployment_state == gca_model.Model.DeploymentState.DEPLOYED + + assert response.etag == "etag_value" + + +def test_update_model_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateModelRequest() + request.model.name = "model.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_model), "__call__") as call: + call.return_value = gca_model.Model() + + client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "model.name=model.name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_model_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateModelRequest() + request.model.name = "model.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_model), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) + + await client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "model.name=model.name/value",) in kw["metadata"] + + +def test_update_model_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_model( + model=gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].model == gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ) + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_model_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_model( + service.UpdateModelRequest(), + model=gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_model_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_model( + model=gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].model == gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ) + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_model_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_model( + service.UpdateModelRequest(), + model=gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_deploy_model(transport: str = "grpc", request_type=service.DeployModelRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.deploy_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.DeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_deploy_model_from_dict(): + test_deploy_model(request_type=dict) + + +@pytest.mark.asyncio +async def test_deploy_model_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.DeployModelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.deploy_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_deploy_model_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeployModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.deploy_model), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_deploy_model_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeployModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.deploy_model), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_deploy_model_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.deploy_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.deploy_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_deploy_model_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.deploy_model( + service.DeployModelRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_deploy_model_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.deploy_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.deploy_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_deploy_model_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.deploy_model( + service.DeployModelRequest(), name="name_value", + ) + + +def test_undeploy_model( + transport: str = "grpc", request_type=service.UndeployModelRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.undeploy_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.UndeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undeploy_model_from_dict(): + test_undeploy_model(request_type=dict) + + +@pytest.mark.asyncio +async def test_undeploy_model_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.UndeployModelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.undeploy_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undeploy_model_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UndeployModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.undeploy_model), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_undeploy_model_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UndeployModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.undeploy_model), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_undeploy_model_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.undeploy_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.undeploy_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_undeploy_model_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undeploy_model( + service.UndeployModelRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.undeploy_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.undeploy_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.undeploy_model( + service.UndeployModelRequest(), name="name_value", + ) + + +def test_export_model(transport: str = "grpc", request_type=service.ExportModelRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.export_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ExportModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_model_from_dict(): + test_export_model(request_type=dict) + + +@pytest.mark.asyncio +async def test_export_model_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.ExportModelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.export_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_model_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.export_model), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_export_model_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.export_model), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_export_model_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.export_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_model( + name="name_value", + output_config=io.ModelExportOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].output_config == io.ModelExportOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ) + + +def test_export_model_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_model( + service.ExportModelRequest(), + name="name_value", + output_config=io.ModelExportOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + +@pytest.mark.asyncio +async def test_export_model_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.export_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_model( + name="name_value", + output_config=io.ModelExportOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].output_config == io.ModelExportOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ) + + +@pytest.mark.asyncio +async def test_export_model_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_model( + service.ExportModelRequest(), + name="name_value", + output_config=io.ModelExportOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + +def test_get_model_evaluation( + transport: str = "grpc", request_type=service.GetModelEvaluationRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation( + name="name_value", + annotation_spec_id="annotation_spec_id_value", + display_name="display_name_value", + evaluated_example_count=2446, + classification_evaluation_metrics=classification.ClassificationEvaluationMetrics( + au_prc=0.634 + ), + ) + + response = client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + + assert response.name == "name_value" + + assert response.annotation_spec_id == "annotation_spec_id_value" + + assert response.display_name == "display_name_value" + + assert response.evaluated_example_count == 2446 + + +def test_get_model_evaluation_from_dict(): + test_get_model_evaluation(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_model_evaluation_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.GetModelEvaluationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_evaluation.ModelEvaluation( + name="name_value", + annotation_spec_id="annotation_spec_id_value", + display_name="display_name_value", + evaluated_example_count=2446, + ) + ) + + response = await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + + assert response.name == "name_value" + + assert response.annotation_spec_id == "annotation_spec_id_value" + + assert response.display_name == "display_name_value" + + assert response.evaluated_example_count == 2446 + + +def test_get_model_evaluation_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelEvaluationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_model_evaluation), "__call__" + ) as call: + call.return_value = model_evaluation.ModelEvaluation() + + client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_model_evaluation_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelEvaluationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_model_evaluation), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_evaluation.ModelEvaluation() + ) + + await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_model_evaluation_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_evaluation(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_model_evaluation_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_evaluation( + service.GetModelEvaluationRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_evaluation.ModelEvaluation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_evaluation(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_evaluation( + service.GetModelEvaluationRequest(), name="name_value", + ) + + +def test_list_model_evaluations( + transport: str = "grpc", request_type=service.ListModelEvaluationsRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_model_evaluations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelEvaluationsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListModelEvaluationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_model_evaluations_from_dict(): + test_list_model_evaluations(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.ListModelEvaluationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_model_evaluations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListModelEvaluationsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_model_evaluations_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelEvaluationsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_model_evaluations), "__call__" + ) as call: + call.return_value = service.ListModelEvaluationsResponse() + + client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_model_evaluations_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelEvaluationsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_model_evaluations), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListModelEvaluationsResponse() + ) + + await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_model_evaluations_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_model_evaluations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelEvaluationsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_evaluations( + parent="parent_value", filter="filter_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].filter == "filter_value" + + +def test_list_model_evaluations_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_evaluations( + service.ListModelEvaluationsRequest(), + parent="parent_value", + filter="filter_value", + ) + + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_model_evaluations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelEvaluationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListModelEvaluationsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_evaluations( + parent="parent_value", filter="filter_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].filter == "filter_value" + + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_evaluations( + service.ListModelEvaluationsRequest(), + parent="parent_value", + filter="filter_value", + ) + + +def test_list_model_evaluations_pager(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_model_evaluations), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token="abc", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], next_page_token="def", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[model_evaluation.ModelEvaluation(),], + next_page_token="ghi", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_model_evaluations(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) for i in results) + + +def test_list_model_evaluations_pages(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_model_evaluations), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token="abc", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], next_page_token="def", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[model_evaluation.ModelEvaluation(),], + next_page_token="ghi", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_evaluations(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pager(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_model_evaluations), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token="abc", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], next_page_token="def", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[model_evaluation.ModelEvaluation(),], + next_page_token="ghi", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_evaluations(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) for i in responses) + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pages(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_model_evaluations), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token="abc", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], next_page_token="def", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[model_evaluation.ModelEvaluation(),], + next_page_token="ghi", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = [] + async for page in (await client.list_model_evaluations(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoMlClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoMlClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = AutoMlClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.AutoMlGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.AutoMlGrpcTransport,) + + +def test_auto_ml_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.AutoMlTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_auto_ml_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.automl_v1.services.auto_ml.transports.AutoMlTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.AutoMlTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_dataset", + "get_dataset", + "list_datasets", + "update_dataset", + "delete_dataset", + "import_data", + "export_data", + "get_annotation_spec", + "create_model", + "get_model", + "list_models", + "delete_model", + "update_model", + "deploy_model", + "undeploy_model", + "export_model", + "get_model_evaluation", + "list_model_evaluations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_auto_ml_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.automl_v1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.AutoMlTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_auto_ml_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + AutoMlClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_auto_ml_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.AutoMlGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_auto_ml_host_no_port(): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="automl.googleapis.com" + ), + ) + assert client._transport._host == "automl.googleapis.com:443" + + +def test_auto_ml_host_with_port(): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="automl.googleapis.com:8000" + ), + ) + assert client._transport._host == "automl.googleapis.com:8000" + + +def test_auto_ml_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.AutoMlGrpcTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +def test_auto_ml_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.AutoMlGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_auto_ml_grpc_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.AutoMlGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_auto_ml_grpc_asyncio_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.AutoMlGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_auto_ml_grpc_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.AutoMlGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_auto_ml_grpc_asyncio_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.AutoMlGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_auto_ml_grpc_lro_client(): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_auto_ml_grpc_lro_async_client(): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client._client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_model_path(): + project = "squid" + location = "clam" + model = "whelk" + + expected = "projects/{project}/locations/{location}/models/{model}".format( + project=project, location=location, model=model, + ) + actual = AutoMlClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model": "nudibranch", + } + path = AutoMlClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_model_path(path) + assert expected == actual + + +def test_dataset_path(): + project = "squid" + location = "clam" + dataset = "whelk" + + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, + ) + actual = AutoMlClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "octopus", + "location": "oyster", + "dataset": "nudibranch", + } + path = AutoMlClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_dataset_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.AutoMlTransport, "_prep_wrapped_messages" + ) as prep: + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.AutoMlTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = AutoMlClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/automl_v1/test_prediction_service.py b/tests/unit/gapic/automl_v1/test_prediction_service.py new file mode 100644 index 00000000..a0087eae --- /dev/null +++ b/tests/unit/gapic/automl_v1/test_prediction_service.py @@ -0,0 +1,1234 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.automl_v1.services.prediction_service import ( + PredictionServiceAsyncClient, +) +from google.cloud.automl_v1.services.prediction_service import PredictionServiceClient +from google.cloud.automl_v1.services.prediction_service import transports +from google.cloud.automl_v1.types import annotation_payload +from google.cloud.automl_v1.types import data_items +from google.cloud.automl_v1.types import geometry +from google.cloud.automl_v1.types import io +from google.cloud.automl_v1.types import operations +from google.cloud.automl_v1.types import prediction_service +from google.cloud.automl_v1.types import text_segment +from google.longrunning import operations_pb2 +from google.oauth2 import service_account + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PredictionServiceClient._get_default_mtls_endpoint(None) is None + assert ( + PredictionServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + PredictionServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [PredictionServiceClient, PredictionServiceAsyncClient] +) +def test_prediction_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "automl.googleapis.com:443" + + +def test_prediction_service_client_get_transport_class(): + transport = PredictionServiceClient.get_transport_class() + assert transport == transports.PredictionServiceGrpcTransport + + transport = PredictionServiceClient.get_transport_class("grpc") + assert transport == transports.PredictionServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + ( + PredictionServiceAsyncClient, + transports.PredictionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + PredictionServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(PredictionServiceClient), +) +@mock.patch.object( + PredictionServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(PredictionServiceAsyncClient), +) +def test_prediction_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PredictionServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PredictionServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=client_cert_source_callback, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and default_client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", but client_cert_source and default_client_cert_source are None. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + ( + PredictionServiceAsyncClient, + transports.PredictionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_prediction_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + ( + PredictionServiceAsyncClient, + transports.PredictionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_prediction_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_prediction_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.automl_v1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = PredictionServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_predict( + transport: str = "grpc", request_type=prediction_service.PredictRequest +): + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + + response = client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == prediction_service.PredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + + +def test_predict_from_dict(): + test_predict(request_type=dict) + + +@pytest.mark.asyncio +async def test_predict_async(transport: str = "grpc_asyncio"): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = prediction_service.PredictRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._client._transport.predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + prediction_service.PredictResponse() + ) + + response = await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + + +def test_predict_field_headers(): + client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.predict), "__call__") as call: + call.return_value = prediction_service.PredictResponse() + + client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_predict_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._client._transport.predict), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + prediction_service.PredictResponse() + ) + + await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_predict_flattened(): + client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.predict( + name="name_value", + payload=data_items.ExamplePayload( + image=data_items.Image(image_bytes=b"image_bytes_blob") + ), + params={"key_value": "value_value"}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].payload == data_items.ExamplePayload( + image=data_items.Image(image_bytes=b"image_bytes_blob") + ) + + assert args[0].params == {"key_value": "value_value"} + + +def test_predict_flattened_error(): + client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.predict( + prediction_service.PredictRequest(), + name="name_value", + payload=data_items.ExamplePayload( + image=data_items.Image(image_bytes=b"image_bytes_blob") + ), + params={"key_value": "value_value"}, + ) + + +@pytest.mark.asyncio +async def test_predict_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._client._transport.predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + prediction_service.PredictResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.predict( + name="name_value", + payload=data_items.ExamplePayload( + image=data_items.Image(image_bytes=b"image_bytes_blob") + ), + params={"key_value": "value_value"}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].payload == data_items.ExamplePayload( + image=data_items.Image(image_bytes=b"image_bytes_blob") + ) + + assert args[0].params == {"key_value": "value_value"} + + +@pytest.mark.asyncio +async def test_predict_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.predict( + prediction_service.PredictRequest(), + name="name_value", + payload=data_items.ExamplePayload( + image=data_items.Image(image_bytes=b"image_bytes_blob") + ), + params={"key_value": "value_value"}, + ) + + +def test_batch_predict( + transport: str = "grpc", request_type=prediction_service.BatchPredictRequest +): + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.batch_predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == prediction_service.BatchPredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_predict_from_dict(): + test_batch_predict(request_type=dict) + + +@pytest.mark.asyncio +async def test_batch_predict_async(transport: str = "grpc_asyncio"): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = prediction_service.BatchPredictRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.batch_predict), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_predict_field_headers(): + client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.BatchPredictRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.batch_predict), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_batch_predict_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.BatchPredictRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.batch_predict), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_batch_predict_flattened(): + client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.batch_predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_predict( + name="name_value", + input_config=io.BatchPredictInputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + output_config=io.BatchPredictOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + params={"key_value": "value_value"}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].input_config == io.BatchPredictInputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ) + + assert args[0].output_config == io.BatchPredictOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ) + + assert args[0].params == {"key_value": "value_value"} + + +def test_batch_predict_flattened_error(): + client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_predict( + prediction_service.BatchPredictRequest(), + name="name_value", + input_config=io.BatchPredictInputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + output_config=io.BatchPredictOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + params={"key_value": "value_value"}, + ) + + +@pytest.mark.asyncio +async def test_batch_predict_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.batch_predict), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_predict( + name="name_value", + input_config=io.BatchPredictInputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + output_config=io.BatchPredictOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + params={"key_value": "value_value"}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].input_config == io.BatchPredictInputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ) + + assert args[0].output_config == io.BatchPredictOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ) + + assert args[0].params == {"key_value": "value_value"} + + +@pytest.mark.asyncio +async def test_batch_predict_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_predict( + prediction_service.BatchPredictRequest(), + name="name_value", + input_config=io.BatchPredictInputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + output_config=io.BatchPredictOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + params={"key_value": "value_value"}, + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = PredictionServiceClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.PredictionServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.PredictionServiceGrpcTransport,) + + +def test_prediction_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.PredictionServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_prediction_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.automl_v1.services.prediction_service.transports.PredictionServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.PredictionServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "predict", + "batch_predict", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_prediction_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.automl_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_prediction_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + PredictionServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_prediction_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.PredictionServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_prediction_service_host_no_port(): + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="automl.googleapis.com" + ), + ) + assert client._transport._host == "automl.googleapis.com:443" + + +def test_prediction_service_host_with_port(): + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="automl.googleapis.com:8000" + ), + ) + assert client._transport._host == "automl.googleapis.com:8000" + + +def test_prediction_service_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.PredictionServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +def test_prediction_service_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.PredictionServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_prediction_service_grpc_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.PredictionServiceGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_prediction_service_grpc_asyncio_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.PredictionServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_prediction_service_grpc_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.PredictionServiceGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_prediction_service_grpc_asyncio_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.PredictionServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_prediction_service_grpc_lro_client(): + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_prediction_service_grpc_lro_async_client(): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client._client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.PredictionServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.PredictionServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = PredictionServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/automl_v1beta1/__init__.py b/tests/unit/gapic/automl_v1beta1/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/tests/unit/gapic/automl_v1beta1/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/gapic/automl_v1beta1/test_auto_ml.py b/tests/unit/gapic/automl_v1beta1/test_auto_ml.py new file mode 100644 index 00000000..2464c824 --- /dev/null +++ b/tests/unit/gapic/automl_v1beta1/test_auto_ml.py @@ -0,0 +1,6622 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.automl_v1beta1.services.auto_ml import AutoMlAsyncClient +from google.cloud.automl_v1beta1.services.auto_ml import AutoMlClient +from google.cloud.automl_v1beta1.services.auto_ml import pagers +from google.cloud.automl_v1beta1.services.auto_ml import transports +from google.cloud.automl_v1beta1.types import annotation_spec +from google.cloud.automl_v1beta1.types import classification +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec +from google.cloud.automl_v1beta1.types import data_stats +from google.cloud.automl_v1beta1.types import data_stats as gca_data_stats +from google.cloud.automl_v1beta1.types import data_types +from google.cloud.automl_v1beta1.types import dataset +from google.cloud.automl_v1beta1.types import dataset as gca_dataset +from google.cloud.automl_v1beta1.types import detection +from google.cloud.automl_v1beta1.types import image +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import model +from google.cloud.automl_v1beta1.types import model as gca_model +from google.cloud.automl_v1beta1.types import model_evaluation +from google.cloud.automl_v1beta1.types import operations +from google.cloud.automl_v1beta1.types import regression +from google.cloud.automl_v1beta1.types import service +from google.cloud.automl_v1beta1.types import table_spec +from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec +from google.cloud.automl_v1beta1.types import tables +from google.cloud.automl_v1beta1.types import text +from google.cloud.automl_v1beta1.types import text_extraction +from google.cloud.automl_v1beta1.types import text_sentiment +from google.cloud.automl_v1beta1.types import translation +from google.cloud.automl_v1beta1.types import video +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert AutoMlClient._get_default_mtls_endpoint(None) is None + assert AutoMlClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ( + AutoMlClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + ) + assert ( + AutoMlClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + AutoMlClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert AutoMlClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient]) +def test_auto_ml_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "automl.googleapis.com:443" + + +def test_auto_ml_client_get_transport_class(): + transport = AutoMlClient.get_transport_class() + assert transport == transports.AutoMlGrpcTransport + + transport = AutoMlClient.get_transport_class("grpc") + assert transport == transports.AutoMlGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +@mock.patch.object( + AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient) +) +@mock.patch.object( + AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient) +) +def test_auto_ml_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(AutoMlClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(AutoMlClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=client_cert_source_callback, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and default_client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", but client_cert_source and default_client_cert_source are None. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_auto_ml_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_auto_ml_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_auto_ml_client_client_options_from_dict(): + with mock.patch( + "google.cloud.automl_v1beta1.services.auto_ml.transports.AutoMlGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = AutoMlClient(client_options={"api_endpoint": "squid.clam.whelk"}) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_dataset( + transport: str = "grpc", request_type=service.CreateDatasetRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset( + name="name_value", + display_name="display_name_value", + description="description_value", + example_count=1396, + etag="etag_value", + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ), + ) + + response = client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.CreateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.example_count == 1396 + + assert response.etag == "etag_value" + + +def test_create_dataset_from_dict(): + test_create_dataset(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_dataset_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.CreateDatasetRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_dataset.Dataset( + name="name_value", + display_name="display_name_value", + description="description_value", + example_count=1396, + etag="etag_value", + ) + ) + + response = await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.example_count == 1396 + + assert response.etag == "etag_value" + + +def test_create_dataset_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateDatasetRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_dataset), "__call__") as call: + call.return_value = gca_dataset.Dataset() + + client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_dataset_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateDatasetRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_dataset), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + + await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_dataset_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_dataset( + parent="parent_value", + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].dataset == gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ) + + +def test_create_dataset_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_dataset( + service.CreateDatasetRequest(), + parent="parent_value", + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + ) + + +@pytest.mark.asyncio +async def test_create_dataset_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_dataset( + parent="parent_value", + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].dataset == gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ) + + +@pytest.mark.asyncio +async def test_create_dataset_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_dataset( + service.CreateDatasetRequest(), + parent="parent_value", + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + ) + + +def test_get_dataset(transport: str = "grpc", request_type=service.GetDatasetRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset( + name="name_value", + display_name="display_name_value", + description="description_value", + example_count=1396, + etag="etag_value", + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ), + ) + + response = client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.example_count == 1396 + + assert response.etag == "etag_value" + + +def test_get_dataset_from_dict(): + test_get_dataset(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_dataset_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.GetDatasetRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + dataset.Dataset( + name="name_value", + display_name="display_name_value", + description="description_value", + example_count=1396, + etag="etag_value", + ) + ) + + response = await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.example_count == 1396 + + assert response.etag == "etag_value" + + +def test_get_dataset_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetDatasetRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_dataset), "__call__") as call: + call.return_value = dataset.Dataset() + + client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_dataset_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetDatasetRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_dataset), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + + await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_dataset_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_dataset(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_dataset_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_dataset( + service.GetDatasetRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_dataset_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_dataset(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_dataset_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_dataset( + service.GetDatasetRequest(), name="name_value", + ) + + +def test_list_datasets( + transport: str = "grpc", request_type=service.ListDatasetsRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_datasets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListDatasetsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListDatasetsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_datasets_from_dict(): + test_list_datasets(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_datasets_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.ListDatasetsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_datasets), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListDatasetsResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_datasets_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListDatasetsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_datasets), "__call__") as call: + call.return_value = service.ListDatasetsResponse() + + client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_datasets_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListDatasetsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_datasets), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListDatasetsResponse() + ) + + await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_datasets_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_datasets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListDatasetsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_datasets(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_datasets_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_datasets( + service.ListDatasetsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_datasets_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_datasets), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListDatasetsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListDatasetsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_datasets(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_datasets_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_datasets( + service.ListDatasetsRequest(), parent="parent_value", + ) + + +def test_list_datasets_pager(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_datasets), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),], + next_page_token="abc", + ), + service.ListDatasetsResponse(datasets=[], next_page_token="def",), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(),], next_page_token="ghi", + ), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_datasets(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, dataset.Dataset) for i in results) + + +def test_list_datasets_pages(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_datasets), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),], + next_page_token="abc", + ), + service.ListDatasetsResponse(datasets=[], next_page_token="def",), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(),], next_page_token="ghi", + ), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(),], + ), + RuntimeError, + ) + pages = list(client.list_datasets(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_datasets_async_pager(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_datasets), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),], + next_page_token="abc", + ), + service.ListDatasetsResponse(datasets=[], next_page_token="def",), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(),], next_page_token="ghi", + ), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(),], + ), + RuntimeError, + ) + async_pager = await client.list_datasets(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, dataset.Dataset) for i in responses) + + +@pytest.mark.asyncio +async def test_list_datasets_async_pages(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_datasets), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),], + next_page_token="abc", + ), + service.ListDatasetsResponse(datasets=[], next_page_token="def",), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(),], next_page_token="ghi", + ), + service.ListDatasetsResponse( + datasets=[dataset.Dataset(), dataset.Dataset(),], + ), + RuntimeError, + ) + pages = [] + async for page in (await client.list_datasets(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_update_dataset( + transport: str = "grpc", request_type=service.UpdateDatasetRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset( + name="name_value", + display_name="display_name_value", + description="description_value", + example_count=1396, + etag="etag_value", + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ), + ) + + response = client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.UpdateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.example_count == 1396 + + assert response.etag == "etag_value" + + +def test_update_dataset_from_dict(): + test_update_dataset(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_dataset_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.UpdateDatasetRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_dataset.Dataset( + name="name_value", + display_name="display_name_value", + description="description_value", + example_count=1396, + etag="etag_value", + ) + ) + + response = await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.description == "description_value" + + assert response.example_count == 1396 + + assert response.etag == "etag_value" + + +def test_update_dataset_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateDatasetRequest() + request.dataset.name = "dataset.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_dataset), "__call__") as call: + call.return_value = gca_dataset.Dataset() + + client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "dataset.name=dataset.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_dataset_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateDatasetRequest() + request.dataset.name = "dataset.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_dataset), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + + await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "dataset.name=dataset.name/value",) in kw[ + "metadata" + ] + + +def test_update_dataset_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_dataset( + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].dataset == gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ) + + +def test_update_dataset_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_dataset( + service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + ) + + +@pytest.mark.asyncio +async def test_update_dataset_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_dataset( + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].dataset == gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ) + + +@pytest.mark.asyncio +async def test_update_dataset_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_dataset( + service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset( + translation_dataset_metadata=translation.TranslationDatasetMetadata( + source_language_code="source_language_code_value" + ) + ), + ) + + +def test_delete_dataset( + transport: str = "grpc", request_type=service.DeleteDatasetRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.DeleteDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_dataset_from_dict(): + test_delete_dataset(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_dataset_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.DeleteDatasetRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_dataset_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteDatasetRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_dataset), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_dataset_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteDatasetRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_dataset), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_dataset_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_dataset), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_dataset(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_dataset_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_dataset( + service.DeleteDatasetRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_dataset), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_dataset(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_dataset( + service.DeleteDatasetRequest(), name="name_value", + ) + + +def test_import_data(transport: str = "grpc", request_type=service.ImportDataRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.import_data), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ImportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_data_from_dict(): + test_import_data(request_type=dict) + + +@pytest.mark.asyncio +async def test_import_data_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.ImportDataRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.import_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_data_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ImportDataRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.import_data), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_import_data_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ImportDataRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.import_data), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_import_data_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.import_data), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_data( + name="name_value", + input_config=io.InputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].input_config == io.InputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ) + + +def test_import_data_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_data( + service.ImportDataRequest(), + name="name_value", + input_config=io.InputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + ) + + +@pytest.mark.asyncio +async def test_import_data_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.import_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_data( + name="name_value", + input_config=io.InputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].input_config == io.InputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ) + + +@pytest.mark.asyncio +async def test_import_data_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_data( + service.ImportDataRequest(), + name="name_value", + input_config=io.InputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + ) + + +def test_export_data(transport: str = "grpc", request_type=service.ExportDataRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.export_data), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ExportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_data_from_dict(): + test_export_data(request_type=dict) + + +@pytest.mark.asyncio +async def test_export_data_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.ExportDataRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.export_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_data_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportDataRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.export_data), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_export_data_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportDataRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.export_data), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_export_data_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.export_data), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_data( + name="name_value", + output_config=io.OutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].output_config == io.OutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ) + + +def test_export_data_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_data( + service.ExportDataRequest(), + name="name_value", + output_config=io.OutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + +@pytest.mark.asyncio +async def test_export_data_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.export_data), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_data( + name="name_value", + output_config=io.OutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].output_config == io.OutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ) + + +@pytest.mark.asyncio +async def test_export_data_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_data( + service.ExportDataRequest(), + name="name_value", + output_config=io.OutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + +def test_get_annotation_spec( + transport: str = "grpc", request_type=service.GetAnnotationSpecRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_annotation_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec( + name="name_value", display_name="display_name_value", example_count=1396, + ) + + response = client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetAnnotationSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.example_count == 1396 + + +def test_get_annotation_spec_from_dict(): + test_get_annotation_spec(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_annotation_spec_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.GetAnnotationSpecRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_annotation_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + annotation_spec.AnnotationSpec( + name="name_value", + display_name="display_name_value", + example_count=1396, + ) + ) + + response = await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.example_count == 1396 + + +def test_get_annotation_spec_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetAnnotationSpecRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_annotation_spec), "__call__" + ) as call: + call.return_value = annotation_spec.AnnotationSpec() + + client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_annotation_spec_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetAnnotationSpecRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_annotation_spec), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + annotation_spec.AnnotationSpec() + ) + + await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_annotation_spec_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_annotation_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_annotation_spec(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_annotation_spec_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_annotation_spec( + service.GetAnnotationSpecRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_annotation_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + annotation_spec.AnnotationSpec() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_annotation_spec(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_annotation_spec( + service.GetAnnotationSpecRequest(), name="name_value", + ) + + +def test_get_table_spec( + transport: str = "grpc", request_type=service.GetTableSpecRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_table_spec), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table_spec.TableSpec( + name="name_value", + time_column_spec_id="time_column_spec_id_value", + row_count=992, + valid_row_count=1615, + column_count=1302, + etag="etag_value", + ) + + response = client.get_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetTableSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table_spec.TableSpec) + + assert response.name == "name_value" + + assert response.time_column_spec_id == "time_column_spec_id_value" + + assert response.row_count == 992 + + assert response.valid_row_count == 1615 + + assert response.column_count == 1302 + + assert response.etag == "etag_value" + + +def test_get_table_spec_from_dict(): + test_get_table_spec(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_table_spec_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.GetTableSpecRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_table_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table_spec.TableSpec( + name="name_value", + time_column_spec_id="time_column_spec_id_value", + row_count=992, + valid_row_count=1615, + column_count=1302, + etag="etag_value", + ) + ) + + response = await client.get_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, table_spec.TableSpec) + + assert response.name == "name_value" + + assert response.time_column_spec_id == "time_column_spec_id_value" + + assert response.row_count == 992 + + assert response.valid_row_count == 1615 + + assert response.column_count == 1302 + + assert response.etag == "etag_value" + + +def test_get_table_spec_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetTableSpecRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_table_spec), "__call__") as call: + call.return_value = table_spec.TableSpec() + + client.get_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_table_spec_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetTableSpecRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_table_spec), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table_spec.TableSpec() + ) + + await client.get_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_table_spec_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_table_spec), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table_spec.TableSpec() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_table_spec(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_table_spec_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_table_spec( + service.GetTableSpecRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_table_spec_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_table_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = table_spec.TableSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table_spec.TableSpec() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_table_spec(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_table_spec_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_table_spec( + service.GetTableSpecRequest(), name="name_value", + ) + + +def test_list_table_specs( + transport: str = "grpc", request_type=service.ListTableSpecsRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_table_specs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListTableSpecsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_table_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListTableSpecsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTableSpecsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_table_specs_from_dict(): + test_list_table_specs(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_table_specs_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.ListTableSpecsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_table_specs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListTableSpecsResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_table_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTableSpecsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_table_specs_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListTableSpecsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_table_specs), "__call__" + ) as call: + call.return_value = service.ListTableSpecsResponse() + + client.list_table_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_table_specs_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListTableSpecsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_table_specs), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListTableSpecsResponse() + ) + + await client.list_table_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_table_specs_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_table_specs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListTableSpecsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_table_specs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_table_specs_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_table_specs( + service.ListTableSpecsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_table_specs_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_table_specs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListTableSpecsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListTableSpecsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_table_specs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_table_specs_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_table_specs( + service.ListTableSpecsRequest(), parent="parent_value", + ) + + +def test_list_table_specs_pager(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_table_specs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + table_spec.TableSpec(), + table_spec.TableSpec(), + ], + next_page_token="abc", + ), + service.ListTableSpecsResponse(table_specs=[], next_page_token="def",), + service.ListTableSpecsResponse( + table_specs=[table_spec.TableSpec(),], next_page_token="ghi", + ), + service.ListTableSpecsResponse( + table_specs=[table_spec.TableSpec(), table_spec.TableSpec(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_table_specs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, table_spec.TableSpec) for i in results) + + +def test_list_table_specs_pages(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_table_specs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + table_spec.TableSpec(), + table_spec.TableSpec(), + ], + next_page_token="abc", + ), + service.ListTableSpecsResponse(table_specs=[], next_page_token="def",), + service.ListTableSpecsResponse( + table_specs=[table_spec.TableSpec(),], next_page_token="ghi", + ), + service.ListTableSpecsResponse( + table_specs=[table_spec.TableSpec(), table_spec.TableSpec(),], + ), + RuntimeError, + ) + pages = list(client.list_table_specs(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_table_specs_async_pager(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_table_specs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + table_spec.TableSpec(), + table_spec.TableSpec(), + ], + next_page_token="abc", + ), + service.ListTableSpecsResponse(table_specs=[], next_page_token="def",), + service.ListTableSpecsResponse( + table_specs=[table_spec.TableSpec(),], next_page_token="ghi", + ), + service.ListTableSpecsResponse( + table_specs=[table_spec.TableSpec(), table_spec.TableSpec(),], + ), + RuntimeError, + ) + async_pager = await client.list_table_specs(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table_spec.TableSpec) for i in responses) + + +@pytest.mark.asyncio +async def test_list_table_specs_async_pages(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_table_specs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + table_spec.TableSpec(), + table_spec.TableSpec(), + ], + next_page_token="abc", + ), + service.ListTableSpecsResponse(table_specs=[], next_page_token="def",), + service.ListTableSpecsResponse( + table_specs=[table_spec.TableSpec(),], next_page_token="ghi", + ), + service.ListTableSpecsResponse( + table_specs=[table_spec.TableSpec(), table_spec.TableSpec(),], + ), + RuntimeError, + ) + pages = [] + async for page in (await client.list_table_specs(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_update_table_spec( + transport: str = "grpc", request_type=service.UpdateTableSpecRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_table_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_table_spec.TableSpec( + name="name_value", + time_column_spec_id="time_column_spec_id_value", + row_count=992, + valid_row_count=1615, + column_count=1302, + etag="etag_value", + ) + + response = client.update_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.UpdateTableSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_table_spec.TableSpec) + + assert response.name == "name_value" + + assert response.time_column_spec_id == "time_column_spec_id_value" + + assert response.row_count == 992 + + assert response.valid_row_count == 1615 + + assert response.column_count == 1302 + + assert response.etag == "etag_value" + + +def test_update_table_spec_from_dict(): + test_update_table_spec(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_table_spec_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.UpdateTableSpecRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_table_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_table_spec.TableSpec( + name="name_value", + time_column_spec_id="time_column_spec_id_value", + row_count=992, + valid_row_count=1615, + column_count=1302, + etag="etag_value", + ) + ) + + response = await client.update_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_table_spec.TableSpec) + + assert response.name == "name_value" + + assert response.time_column_spec_id == "time_column_spec_id_value" + + assert response.row_count == 992 + + assert response.valid_row_count == 1615 + + assert response.column_count == 1302 + + assert response.etag == "etag_value" + + +def test_update_table_spec_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateTableSpecRequest() + request.table_spec.name = "table_spec.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_table_spec), "__call__" + ) as call: + call.return_value = gca_table_spec.TableSpec() + + client.update_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_spec.name=table_spec.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_table_spec_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateTableSpecRequest() + request.table_spec.name = "table_spec.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_table_spec), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_table_spec.TableSpec() + ) + + await client.update_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_spec.name=table_spec.name/value",) in kw[ + "metadata" + ] + + +def test_update_table_spec_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_table_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_table_spec.TableSpec() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_table_spec( + table_spec=gca_table_spec.TableSpec(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].table_spec == gca_table_spec.TableSpec(name="name_value") + + +def test_update_table_spec_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_table_spec( + service.UpdateTableSpecRequest(), + table_spec=gca_table_spec.TableSpec(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_update_table_spec_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_table_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_table_spec.TableSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_table_spec.TableSpec() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_table_spec( + table_spec=gca_table_spec.TableSpec(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].table_spec == gca_table_spec.TableSpec(name="name_value") + + +@pytest.mark.asyncio +async def test_update_table_spec_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_table_spec( + service.UpdateTableSpecRequest(), + table_spec=gca_table_spec.TableSpec(name="name_value"), + ) + + +def test_get_column_spec( + transport: str = "grpc", request_type=service.GetColumnSpecRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_column_spec), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = column_spec.ColumnSpec( + name="name_value", display_name="display_name_value", etag="etag_value", + ) + + response = client.get_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetColumnSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, column_spec.ColumnSpec) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.etag == "etag_value" + + +def test_get_column_spec_from_dict(): + test_get_column_spec(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_column_spec_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.GetColumnSpecRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_column_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + column_spec.ColumnSpec( + name="name_value", display_name="display_name_value", etag="etag_value", + ) + ) + + response = await client.get_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, column_spec.ColumnSpec) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.etag == "etag_value" + + +def test_get_column_spec_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetColumnSpecRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_column_spec), "__call__") as call: + call.return_value = column_spec.ColumnSpec() + + client.get_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_column_spec_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetColumnSpecRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_column_spec), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + column_spec.ColumnSpec() + ) + + await client.get_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_column_spec_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_column_spec), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = column_spec.ColumnSpec() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_column_spec(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_column_spec_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_column_spec( + service.GetColumnSpecRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_column_spec_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_column_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = column_spec.ColumnSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + column_spec.ColumnSpec() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_column_spec(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_column_spec_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_column_spec( + service.GetColumnSpecRequest(), name="name_value", + ) + + +def test_list_column_specs( + transport: str = "grpc", request_type=service.ListColumnSpecsRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_column_specs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListColumnSpecsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_column_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListColumnSpecsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListColumnSpecsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_column_specs_from_dict(): + test_list_column_specs(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_column_specs_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.ListColumnSpecsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_column_specs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListColumnSpecsResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_column_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListColumnSpecsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_column_specs_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListColumnSpecsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_column_specs), "__call__" + ) as call: + call.return_value = service.ListColumnSpecsResponse() + + client.list_column_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_column_specs_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListColumnSpecsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_column_specs), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListColumnSpecsResponse() + ) + + await client.list_column_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_column_specs_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_column_specs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListColumnSpecsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_column_specs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_column_specs_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_column_specs( + service.ListColumnSpecsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_column_specs_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_column_specs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListColumnSpecsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListColumnSpecsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_column_specs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_column_specs_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_column_specs( + service.ListColumnSpecsRequest(), parent="parent_value", + ) + + +def test_list_column_specs_pager(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_column_specs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + ], + next_page_token="abc", + ), + service.ListColumnSpecsResponse(column_specs=[], next_page_token="def",), + service.ListColumnSpecsResponse( + column_specs=[column_spec.ColumnSpec(),], next_page_token="ghi", + ), + service.ListColumnSpecsResponse( + column_specs=[column_spec.ColumnSpec(), column_spec.ColumnSpec(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_column_specs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, column_spec.ColumnSpec) for i in results) + + +def test_list_column_specs_pages(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_column_specs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + ], + next_page_token="abc", + ), + service.ListColumnSpecsResponse(column_specs=[], next_page_token="def",), + service.ListColumnSpecsResponse( + column_specs=[column_spec.ColumnSpec(),], next_page_token="ghi", + ), + service.ListColumnSpecsResponse( + column_specs=[column_spec.ColumnSpec(), column_spec.ColumnSpec(),], + ), + RuntimeError, + ) + pages = list(client.list_column_specs(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_column_specs_async_pager(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_column_specs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + ], + next_page_token="abc", + ), + service.ListColumnSpecsResponse(column_specs=[], next_page_token="def",), + service.ListColumnSpecsResponse( + column_specs=[column_spec.ColumnSpec(),], next_page_token="ghi", + ), + service.ListColumnSpecsResponse( + column_specs=[column_spec.ColumnSpec(), column_spec.ColumnSpec(),], + ), + RuntimeError, + ) + async_pager = await client.list_column_specs(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, column_spec.ColumnSpec) for i in responses) + + +@pytest.mark.asyncio +async def test_list_column_specs_async_pages(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_column_specs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + ], + next_page_token="abc", + ), + service.ListColumnSpecsResponse(column_specs=[], next_page_token="def",), + service.ListColumnSpecsResponse( + column_specs=[column_spec.ColumnSpec(),], next_page_token="ghi", + ), + service.ListColumnSpecsResponse( + column_specs=[column_spec.ColumnSpec(), column_spec.ColumnSpec(),], + ), + RuntimeError, + ) + pages = [] + async for page in (await client.list_column_specs(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_update_column_spec( + transport: str = "grpc", request_type=service.UpdateColumnSpecRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_column_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_column_spec.ColumnSpec( + name="name_value", display_name="display_name_value", etag="etag_value", + ) + + response = client.update_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.UpdateColumnSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_column_spec.ColumnSpec) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.etag == "etag_value" + + +def test_update_column_spec_from_dict(): + test_update_column_spec(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_column_spec_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.UpdateColumnSpecRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_column_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_column_spec.ColumnSpec( + name="name_value", display_name="display_name_value", etag="etag_value", + ) + ) + + response = await client.update_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_column_spec.ColumnSpec) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.etag == "etag_value" + + +def test_update_column_spec_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateColumnSpecRequest() + request.column_spec.name = "column_spec.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_column_spec), "__call__" + ) as call: + call.return_value = gca_column_spec.ColumnSpec() + + client.update_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "column_spec.name=column_spec.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_column_spec_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateColumnSpecRequest() + request.column_spec.name = "column_spec.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_column_spec), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_column_spec.ColumnSpec() + ) + + await client.update_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "column_spec.name=column_spec.name/value",) in kw[ + "metadata" + ] + + +def test_update_column_spec_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_column_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_column_spec.ColumnSpec() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_column_spec( + column_spec=gca_column_spec.ColumnSpec(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].column_spec == gca_column_spec.ColumnSpec(name="name_value") + + +def test_update_column_spec_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_column_spec( + service.UpdateColumnSpecRequest(), + column_spec=gca_column_spec.ColumnSpec(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_update_column_spec_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_column_spec), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_column_spec.ColumnSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_column_spec.ColumnSpec() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_column_spec( + column_spec=gca_column_spec.ColumnSpec(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].column_spec == gca_column_spec.ColumnSpec(name="name_value") + + +@pytest.mark.asyncio +async def test_update_column_spec_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_column_spec( + service.UpdateColumnSpecRequest(), + column_spec=gca_column_spec.ColumnSpec(name="name_value"), + ) + + +def test_create_model(transport: str = "grpc", request_type=service.CreateModelRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.CreateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_model_from_dict(): + test_create_model(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_model_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.CreateModelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_model_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateModelRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_model), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_model_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateModelRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_model), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_model_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_model( + parent="parent_value", + model=gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].model == gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ) + + +def test_create_model_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_model( + service.CreateModelRequest(), + parent="parent_value", + model=gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ), + ) + + +@pytest.mark.asyncio +async def test_create_model_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_model( + parent="parent_value", + model=gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].model == gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ) + + +@pytest.mark.asyncio +async def test_create_model_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_model( + service.CreateModelRequest(), + parent="parent_value", + model=gca_model.Model( + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ) + ), + ) + + +def test_get_model(transport: str = "grpc", request_type=service.GetModelRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model( + name="name_value", + display_name="display_name_value", + dataset_id="dataset_id_value", + deployment_state=model.Model.DeploymentState.DEPLOYED, + translation_model_metadata=translation.TranslationModelMetadata( + base_model="base_model_value" + ), + ) + + response = client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.dataset_id == "dataset_id_value" + + assert response.deployment_state == model.Model.DeploymentState.DEPLOYED + + +def test_get_model_from_dict(): + test_get_model(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_model_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.GetModelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model.Model( + name="name_value", + display_name="display_name_value", + dataset_id="dataset_id_value", + deployment_state=model.Model.DeploymentState.DEPLOYED, + ) + ) + + response = await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.dataset_id == "dataset_id_value" + + assert response.deployment_state == model.Model.DeploymentState.DEPLOYED + + +def test_get_model_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_model), "__call__") as call: + call.return_value = model.Model() + + client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_model_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_model), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + + await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_model_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_model_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model( + service.GetModelRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_model_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_model_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model( + service.GetModelRequest(), name="name_value", + ) + + +def test_list_models(transport: str = "grpc", request_type=service.ListModelsRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_models), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_models_from_dict(): + test_list_models(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_models_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.ListModelsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_models), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListModelsResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_models_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_models), "__call__") as call: + call.return_value = service.ListModelsResponse() + + client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_models_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_models), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListModelsResponse() + ) + + await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_models_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_models), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_models(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_models_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_models( + service.ListModelsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_models_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_models), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListModelsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_models(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_models_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_models( + service.ListModelsRequest(), parent="parent_value", + ) + + +def test_list_models_pager(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_models), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[model.Model(), model.Model(), model.Model(),], + next_page_token="abc", + ), + service.ListModelsResponse(model=[], next_page_token="def",), + service.ListModelsResponse(model=[model.Model(),], next_page_token="ghi",), + service.ListModelsResponse(model=[model.Model(), model.Model(),],), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_models(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, model.Model) for i in results) + + +def test_list_models_pages(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_models), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[model.Model(), model.Model(), model.Model(),], + next_page_token="abc", + ), + service.ListModelsResponse(model=[], next_page_token="def",), + service.ListModelsResponse(model=[model.Model(),], next_page_token="ghi",), + service.ListModelsResponse(model=[model.Model(), model.Model(),],), + RuntimeError, + ) + pages = list(client.list_models(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_models_async_pager(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_models), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[model.Model(), model.Model(), model.Model(),], + next_page_token="abc", + ), + service.ListModelsResponse(model=[], next_page_token="def",), + service.ListModelsResponse(model=[model.Model(),], next_page_token="ghi",), + service.ListModelsResponse(model=[model.Model(), model.Model(),],), + RuntimeError, + ) + async_pager = await client.list_models(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model.Model) for i in responses) + + +@pytest.mark.asyncio +async def test_list_models_async_pages(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_models), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[model.Model(), model.Model(), model.Model(),], + next_page_token="abc", + ), + service.ListModelsResponse(model=[], next_page_token="def",), + service.ListModelsResponse(model=[model.Model(),], next_page_token="ghi",), + service.ListModelsResponse(model=[model.Model(), model.Model(),],), + RuntimeError, + ) + pages = [] + async for page in (await client.list_models(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_delete_model(transport: str = "grpc", request_type=service.DeleteModelRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_from_dict(): + test_delete_model(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_model_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.DeleteModelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_model), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_model_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_model), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_model_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_model_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model( + service.DeleteModelRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_model_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_model_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model( + service.DeleteModelRequest(), name="name_value", + ) + + +def test_deploy_model(transport: str = "grpc", request_type=service.DeployModelRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.deploy_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.DeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_deploy_model_from_dict(): + test_deploy_model(request_type=dict) + + +@pytest.mark.asyncio +async def test_deploy_model_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.DeployModelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.deploy_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_deploy_model_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeployModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.deploy_model), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_deploy_model_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeployModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.deploy_model), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_deploy_model_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.deploy_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.deploy_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_deploy_model_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.deploy_model( + service.DeployModelRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_deploy_model_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.deploy_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.deploy_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_deploy_model_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.deploy_model( + service.DeployModelRequest(), name="name_value", + ) + + +def test_undeploy_model( + transport: str = "grpc", request_type=service.UndeployModelRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.undeploy_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.UndeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undeploy_model_from_dict(): + test_undeploy_model(request_type=dict) + + +@pytest.mark.asyncio +async def test_undeploy_model_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.UndeployModelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.undeploy_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undeploy_model_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UndeployModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.undeploy_model), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_undeploy_model_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UndeployModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.undeploy_model), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_undeploy_model_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.undeploy_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.undeploy_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_undeploy_model_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undeploy_model( + service.UndeployModelRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.undeploy_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.undeploy_model(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.undeploy_model( + service.UndeployModelRequest(), name="name_value", + ) + + +def test_export_model(transport: str = "grpc", request_type=service.ExportModelRequest): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.export_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ExportModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_model_from_dict(): + test_export_model(request_type=dict) + + +@pytest.mark.asyncio +async def test_export_model_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.ExportModelRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.export_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_model_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.export_model), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_export_model_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportModelRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.export_model), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_export_model_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.export_model), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_model( + name="name_value", + output_config=io.ModelExportOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].output_config == io.ModelExportOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ) + + +def test_export_model_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_model( + service.ExportModelRequest(), + name="name_value", + output_config=io.ModelExportOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + +@pytest.mark.asyncio +async def test_export_model_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.export_model), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_model( + name="name_value", + output_config=io.ModelExportOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].output_config == io.ModelExportOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ) + + +@pytest.mark.asyncio +async def test_export_model_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_model( + service.ExportModelRequest(), + name="name_value", + output_config=io.ModelExportOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + ) + + +def test_export_evaluated_examples( + transport: str = "grpc", request_type=service.ExportEvaluatedExamplesRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.export_evaluated_examples), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.export_evaluated_examples(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ExportEvaluatedExamplesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_evaluated_examples_from_dict(): + test_export_evaluated_examples(request_type=dict) + + +@pytest.mark.asyncio +async def test_export_evaluated_examples_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.ExportEvaluatedExamplesRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.export_evaluated_examples), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.export_evaluated_examples(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_evaluated_examples_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportEvaluatedExamplesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.export_evaluated_examples), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.export_evaluated_examples(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_export_evaluated_examples_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportEvaluatedExamplesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.export_evaluated_examples), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.export_evaluated_examples(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_export_evaluated_examples_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.export_evaluated_examples), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_evaluated_examples( + name="name_value", + output_config=io.ExportEvaluatedExamplesOutputConfig( + bigquery_destination=io.BigQueryDestination( + output_uri="output_uri_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].output_config == io.ExportEvaluatedExamplesOutputConfig( + bigquery_destination=io.BigQueryDestination(output_uri="output_uri_value") + ) + + +def test_export_evaluated_examples_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_evaluated_examples( + service.ExportEvaluatedExamplesRequest(), + name="name_value", + output_config=io.ExportEvaluatedExamplesOutputConfig( + bigquery_destination=io.BigQueryDestination( + output_uri="output_uri_value" + ) + ), + ) + + +@pytest.mark.asyncio +async def test_export_evaluated_examples_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.export_evaluated_examples), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_evaluated_examples( + name="name_value", + output_config=io.ExportEvaluatedExamplesOutputConfig( + bigquery_destination=io.BigQueryDestination( + output_uri="output_uri_value" + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].output_config == io.ExportEvaluatedExamplesOutputConfig( + bigquery_destination=io.BigQueryDestination(output_uri="output_uri_value") + ) + + +@pytest.mark.asyncio +async def test_export_evaluated_examples_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_evaluated_examples( + service.ExportEvaluatedExamplesRequest(), + name="name_value", + output_config=io.ExportEvaluatedExamplesOutputConfig( + bigquery_destination=io.BigQueryDestination( + output_uri="output_uri_value" + ) + ), + ) + + +def test_get_model_evaluation( + transport: str = "grpc", request_type=service.GetModelEvaluationRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation( + name="name_value", + annotation_spec_id="annotation_spec_id_value", + display_name="display_name_value", + evaluated_example_count=2446, + classification_evaluation_metrics=classification.ClassificationEvaluationMetrics( + au_prc=0.634 + ), + ) + + response = client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + + assert response.name == "name_value" + + assert response.annotation_spec_id == "annotation_spec_id_value" + + assert response.display_name == "display_name_value" + + assert response.evaluated_example_count == 2446 + + +def test_get_model_evaluation_from_dict(): + test_get_model_evaluation(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_model_evaluation_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.GetModelEvaluationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_evaluation.ModelEvaluation( + name="name_value", + annotation_spec_id="annotation_spec_id_value", + display_name="display_name_value", + evaluated_example_count=2446, + ) + ) + + response = await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + + assert response.name == "name_value" + + assert response.annotation_spec_id == "annotation_spec_id_value" + + assert response.display_name == "display_name_value" + + assert response.evaluated_example_count == 2446 + + +def test_get_model_evaluation_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelEvaluationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_model_evaluation), "__call__" + ) as call: + call.return_value = model_evaluation.ModelEvaluation() + + client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_model_evaluation_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelEvaluationRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_model_evaluation), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_evaluation.ModelEvaluation() + ) + + await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_model_evaluation_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_evaluation(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_model_evaluation_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_evaluation( + service.GetModelEvaluationRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_evaluation.ModelEvaluation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_evaluation(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_evaluation( + service.GetModelEvaluationRequest(), name="name_value", + ) + + +def test_list_model_evaluations( + transport: str = "grpc", request_type=service.ListModelEvaluationsRequest +): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_model_evaluations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelEvaluationsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListModelEvaluationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_model_evaluations_from_dict(): + test_list_model_evaluations(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async(transport: str = "grpc_asyncio"): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = service.ListModelEvaluationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_model_evaluations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListModelEvaluationsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_model_evaluations_field_headers(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelEvaluationsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_model_evaluations), "__call__" + ) as call: + call.return_value = service.ListModelEvaluationsResponse() + + client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_model_evaluations_field_headers_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelEvaluationsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_model_evaluations), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListModelEvaluationsResponse() + ) + + await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_model_evaluations_flattened(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_model_evaluations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelEvaluationsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_evaluations(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_model_evaluations_flattened_error(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_evaluations( + service.ListModelEvaluationsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_model_evaluations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelEvaluationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + service.ListModelEvaluationsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_evaluations(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_error_async(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_evaluations( + service.ListModelEvaluationsRequest(), parent="parent_value", + ) + + +def test_list_model_evaluations_pager(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_model_evaluations), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token="abc", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], next_page_token="def", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[model_evaluation.ModelEvaluation(),], + next_page_token="ghi", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_model_evaluations(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) for i in results) + + +def test_list_model_evaluations_pages(): + client = AutoMlClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_model_evaluations), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token="abc", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], next_page_token="def", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[model_evaluation.ModelEvaluation(),], + next_page_token="ghi", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_evaluations(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pager(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_model_evaluations), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token="abc", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], next_page_token="def", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[model_evaluation.ModelEvaluation(),], + next_page_token="ghi", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_evaluations(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) for i in responses) + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pages(): + client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_model_evaluations), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token="abc", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], next_page_token="def", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[model_evaluation.ModelEvaluation(),], + next_page_token="ghi", + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = [] + async for page in (await client.list_model_evaluations(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoMlClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoMlClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = AutoMlClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.AutoMlGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = AutoMlClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.AutoMlGrpcTransport,) + + +def test_auto_ml_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.AutoMlTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_auto_ml_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.automl_v1beta1.services.auto_ml.transports.AutoMlTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.AutoMlTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_dataset", + "get_dataset", + "list_datasets", + "update_dataset", + "delete_dataset", + "import_data", + "export_data", + "get_annotation_spec", + "get_table_spec", + "list_table_specs", + "update_table_spec", + "get_column_spec", + "list_column_specs", + "update_column_spec", + "create_model", + "get_model", + "list_models", + "delete_model", + "deploy_model", + "undeploy_model", + "export_model", + "export_evaluated_examples", + "get_model_evaluation", + "list_model_evaluations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_auto_ml_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.automl_v1beta1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.AutoMlTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_auto_ml_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + AutoMlClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_auto_ml_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.AutoMlGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_auto_ml_host_no_port(): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="automl.googleapis.com" + ), + ) + assert client._transport._host == "automl.googleapis.com:443" + + +def test_auto_ml_host_with_port(): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="automl.googleapis.com:8000" + ), + ) + assert client._transport._host == "automl.googleapis.com:8000" + + +def test_auto_ml_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.AutoMlGrpcTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +def test_auto_ml_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.AutoMlGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_auto_ml_grpc_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.AutoMlGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_auto_ml_grpc_asyncio_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.AutoMlGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_auto_ml_grpc_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.AutoMlGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_auto_ml_grpc_asyncio_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.AutoMlGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_auto_ml_grpc_lro_client(): + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_auto_ml_grpc_lro_async_client(): + client = AutoMlAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client._client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_dataset_path(): + project = "squid" + location = "clam" + dataset = "whelk" + + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, + ) + actual = AutoMlClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "octopus", + "location": "oyster", + "dataset": "nudibranch", + } + path = AutoMlClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_dataset_path(path) + assert expected == actual + + +def test_model_path(): + project = "squid" + location = "clam" + model = "whelk" + + expected = "projects/{project}/locations/{location}/models/{model}".format( + project=project, location=location, model=model, + ) + actual = AutoMlClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model": "nudibranch", + } + path = AutoMlClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_model_path(path) + assert expected == actual + + +def test_column_spec_path(): + project = "squid" + location = "clam" + dataset = "whelk" + table_spec = "octopus" + column_spec = "oyster" + + expected = "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}".format( + project=project, + location=location, + dataset=dataset, + table_spec=table_spec, + column_spec=column_spec, + ) + actual = AutoMlClient.column_spec_path( + project, location, dataset, table_spec, column_spec + ) + assert expected == actual + + +def test_parse_column_spec_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "dataset": "mussel", + "table_spec": "winkle", + "column_spec": "nautilus", + } + path = AutoMlClient.column_spec_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_column_spec_path(path) + assert expected == actual + + +def test_table_spec_path(): + project = "squid" + location = "clam" + dataset = "whelk" + table_spec = "octopus" + + expected = "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}".format( + project=project, location=location, dataset=dataset, table_spec=table_spec, + ) + actual = AutoMlClient.table_spec_path(project, location, dataset, table_spec) + assert expected == actual + + +def test_parse_table_spec_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "dataset": "cuttlefish", + "table_spec": "mussel", + } + path = AutoMlClient.table_spec_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_table_spec_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.AutoMlTransport, "_prep_wrapped_messages" + ) as prep: + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.AutoMlTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = AutoMlClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/automl_v1beta1/test_prediction_service.py b/tests/unit/gapic/automl_v1beta1/test_prediction_service.py new file mode 100644 index 00000000..c21f17b9 --- /dev/null +++ b/tests/unit/gapic/automl_v1beta1/test_prediction_service.py @@ -0,0 +1,1237 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.automl_v1beta1.services.prediction_service import ( + PredictionServiceAsyncClient, +) +from google.cloud.automl_v1beta1.services.prediction_service import ( + PredictionServiceClient, +) +from google.cloud.automl_v1beta1.services.prediction_service import transports +from google.cloud.automl_v1beta1.types import annotation_payload +from google.cloud.automl_v1beta1.types import data_items +from google.cloud.automl_v1beta1.types import geometry +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import operations +from google.cloud.automl_v1beta1.types import prediction_service +from google.cloud.automl_v1beta1.types import text_segment +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import struct_pb2 as struct # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PredictionServiceClient._get_default_mtls_endpoint(None) is None + assert ( + PredictionServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + PredictionServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [PredictionServiceClient, PredictionServiceAsyncClient] +) +def test_prediction_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "automl.googleapis.com:443" + + +def test_prediction_service_client_get_transport_class(): + transport = PredictionServiceClient.get_transport_class() + assert transport == transports.PredictionServiceGrpcTransport + + transport = PredictionServiceClient.get_transport_class("grpc") + assert transport == transports.PredictionServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + ( + PredictionServiceAsyncClient, + transports.PredictionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + PredictionServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(PredictionServiceClient), +) +@mock.patch.object( + PredictionServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(PredictionServiceAsyncClient), +) +def test_prediction_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PredictionServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PredictionServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=client_cert_source_callback, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and default_client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", but client_cert_source and default_client_cert_source are None. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + ( + PredictionServiceAsyncClient, + transports.PredictionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_prediction_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + ( + PredictionServiceAsyncClient, + transports.PredictionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_prediction_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_prediction_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.automl_v1beta1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = PredictionServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_predict( + transport: str = "grpc", request_type=prediction_service.PredictRequest +): + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + + response = client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == prediction_service.PredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + + +def test_predict_from_dict(): + test_predict(request_type=dict) + + +@pytest.mark.asyncio +async def test_predict_async(transport: str = "grpc_asyncio"): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = prediction_service.PredictRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._client._transport.predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + prediction_service.PredictResponse() + ) + + response = await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + + +def test_predict_field_headers(): + client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.predict), "__call__") as call: + call.return_value = prediction_service.PredictResponse() + + client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_predict_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._client._transport.predict), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + prediction_service.PredictResponse() + ) + + await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_predict_flattened(): + client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.predict( + name="name_value", + payload=data_items.ExamplePayload( + image=data_items.Image(image_bytes=b"image_bytes_blob") + ), + params={"key_value": "value_value"}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].payload == data_items.ExamplePayload( + image=data_items.Image(image_bytes=b"image_bytes_blob") + ) + + assert args[0].params == {"key_value": "value_value"} + + +def test_predict_flattened_error(): + client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.predict( + prediction_service.PredictRequest(), + name="name_value", + payload=data_items.ExamplePayload( + image=data_items.Image(image_bytes=b"image_bytes_blob") + ), + params={"key_value": "value_value"}, + ) + + +@pytest.mark.asyncio +async def test_predict_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._client._transport.predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + prediction_service.PredictResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.predict( + name="name_value", + payload=data_items.ExamplePayload( + image=data_items.Image(image_bytes=b"image_bytes_blob") + ), + params={"key_value": "value_value"}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].payload == data_items.ExamplePayload( + image=data_items.Image(image_bytes=b"image_bytes_blob") + ) + + assert args[0].params == {"key_value": "value_value"} + + +@pytest.mark.asyncio +async def test_predict_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.predict( + prediction_service.PredictRequest(), + name="name_value", + payload=data_items.ExamplePayload( + image=data_items.Image(image_bytes=b"image_bytes_blob") + ), + params={"key_value": "value_value"}, + ) + + +def test_batch_predict( + transport: str = "grpc", request_type=prediction_service.BatchPredictRequest +): + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.batch_predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == prediction_service.BatchPredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_predict_from_dict(): + test_batch_predict(request_type=dict) + + +@pytest.mark.asyncio +async def test_batch_predict_async(transport: str = "grpc_asyncio"): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = prediction_service.BatchPredictRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.batch_predict), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_predict_field_headers(): + client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.BatchPredictRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.batch_predict), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_batch_predict_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.BatchPredictRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.batch_predict), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_batch_predict_flattened(): + client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.batch_predict), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_predict( + name="name_value", + input_config=io.BatchPredictInputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + output_config=io.BatchPredictOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + params={"key_value": "value_value"}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].input_config == io.BatchPredictInputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ) + + assert args[0].output_config == io.BatchPredictOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ) + + assert args[0].params == {"key_value": "value_value"} + + +def test_batch_predict_flattened_error(): + client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_predict( + prediction_service.BatchPredictRequest(), + name="name_value", + input_config=io.BatchPredictInputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + output_config=io.BatchPredictOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + params={"key_value": "value_value"}, + ) + + +@pytest.mark.asyncio +async def test_batch_predict_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.batch_predict), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_predict( + name="name_value", + input_config=io.BatchPredictInputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + output_config=io.BatchPredictOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + params={"key_value": "value_value"}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].input_config == io.BatchPredictInputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ) + + assert args[0].output_config == io.BatchPredictOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ) + + assert args[0].params == {"key_value": "value_value"} + + +@pytest.mark.asyncio +async def test_batch_predict_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_predict( + prediction_service.BatchPredictRequest(), + name="name_value", + input_config=io.BatchPredictInputConfig( + gcs_source=io.GcsSource(input_uris=["input_uris_value"]) + ), + output_config=io.BatchPredictOutputConfig( + gcs_destination=io.GcsDestination( + output_uri_prefix="output_uri_prefix_value" + ) + ), + params={"key_value": "value_value"}, + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = PredictionServiceClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.PredictionServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.PredictionServiceGrpcTransport,) + + +def test_prediction_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.PredictionServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_prediction_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.automl_v1beta1.services.prediction_service.transports.PredictionServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.PredictionServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "predict", + "batch_predict", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_prediction_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.automl_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_prediction_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + PredictionServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_prediction_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.PredictionServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_prediction_service_host_no_port(): + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="automl.googleapis.com" + ), + ) + assert client._transport._host == "automl.googleapis.com:443" + + +def test_prediction_service_host_with_port(): + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="automl.googleapis.com:8000" + ), + ) + assert client._transport._host == "automl.googleapis.com:8000" + + +def test_prediction_service_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.PredictionServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +def test_prediction_service_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.PredictionServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_prediction_service_grpc_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.PredictionServiceGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_prediction_service_grpc_asyncio_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.PredictionServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_prediction_service_grpc_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.PredictionServiceGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_prediction_service_grpc_asyncio_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.PredictionServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_prediction_service_grpc_lro_client(): + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_prediction_service_grpc_lro_async_client(): + client = PredictionServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client._client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.PredictionServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.PredictionServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = PredictionServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/v1/test_auto_ml_client_v1.py b/tests/unit/gapic/v1/test_auto_ml_client_v1.py deleted file mode 100644 index 1b8ae7d5..00000000 --- a/tests/unit/gapic/v1/test_auto_ml_client_v1.py +++ /dev/null @@ -1,991 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import automl_v1 -from google.cloud.automl_v1.proto import annotation_spec_pb2 -from google.cloud.automl_v1.proto import dataset_pb2 -from google.cloud.automl_v1.proto import io_pb2 -from google.cloud.automl_v1.proto import model_evaluation_pb2 -from google.cloud.automl_v1.proto import model_pb2 -from google.cloud.automl_v1.proto import service_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestAutoMlClient(object): - def test_create_dataset(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - description = "description-1724546052" - example_count = 1517063674 - etag = "etag3123477" - expected_response = { - "name": name, - "display_name": display_name, - "description": description, - "example_count": example_count, - "etag": etag, - } - expected_response = dataset_pb2.Dataset(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_dataset", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - dataset = {} - - response = client.create_dataset(parent, dataset) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.CreateDatasetRequest( - parent=parent, dataset=dataset - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_dataset_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_dataset_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - dataset = {} - - response = client.create_dataset(parent, dataset) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_dataset(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - description = "description-1724546052" - example_count = 1517063674 - etag = "etag3123477" - expected_response = { - "name": name_2, - "display_name": display_name, - "description": description, - "example_count": example_count, - "etag": etag, - } - expected_response = dataset_pb2.Dataset(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - response = client.get_dataset(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.GetDatasetRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_dataset_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - with pytest.raises(CustomException): - client.get_dataset(name) - - def test_list_datasets(self): - # Setup Expected Response - next_page_token = "" - datasets_element = {} - datasets = [datasets_element] - expected_response = {"next_page_token": next_page_token, "datasets": datasets} - expected_response = service_pb2.ListDatasetsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - - paged_list_response = client.list_datasets(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.datasets[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = service_pb2.ListDatasetsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_datasets_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup request - parent = client.location_path("[PROJECT]", "[LOCATION]") - - paged_list_response = client.list_datasets(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_dataset(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - description = "description-1724546052" - example_count = 1517063674 - etag = "etag3123477" - expected_response = { - "name": name, - "display_name": display_name, - "description": description, - "example_count": example_count, - "etag": etag, - } - expected_response = dataset_pb2.Dataset(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - dataset = {} - update_mask = {} - - response = client.update_dataset(dataset, update_mask) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.UpdateDatasetRequest( - dataset=dataset, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_dataset_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup request - dataset = {} - update_mask = {} - - with pytest.raises(CustomException): - client.update_dataset(dataset, update_mask) - - def test_delete_dataset(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_delete_dataset", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - response = client.delete_dataset(name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.DeleteDatasetRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_dataset_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_delete_dataset_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - response = client.delete_dataset(name) - exception = response.exception() - assert exception.errors[0] == error - - def test_import_data(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_import_data", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - input_config = {} - - response = client.import_data(name, input_config) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.ImportDataRequest( - name=name, input_config=input_config - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_import_data_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_import_data_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - input_config = {} - - response = client.import_data(name, input_config) - exception = response.exception() - assert exception.errors[0] == error - - def test_export_data(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_export_data", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - output_config = {} - - response = client.export_data(name, output_config) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.ExportDataRequest( - name=name, output_config=output_config - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_export_data_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_export_data_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - output_config = {} - - response = client.export_data(name, output_config) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_annotation_spec(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - example_count = 1517063674 - expected_response = { - "name": name_2, - "display_name": display_name, - "example_count": example_count, - } - expected_response = annotation_spec_pb2.AnnotationSpec(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.annotation_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]" - ) - - response = client.get_annotation_spec(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.GetAnnotationSpecRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_annotation_spec_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup request - name = client.annotation_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]" - ) - - with pytest.raises(CustomException): - client.get_annotation_spec(name) - - def test_create_model(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - dataset_id = "datasetId-2115646910" - etag = "etag3123477" - expected_response = { - "name": name, - "display_name": display_name, - "dataset_id": dataset_id, - "etag": etag, - } - expected_response = model_pb2.Model(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_model", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - model = {} - - response = client.create_model(parent, model) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.CreateModelRequest(parent=parent, model=model) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_model_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - model = {} - - response = client.create_model(parent, model) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_model(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - dataset_id = "datasetId-2115646910" - etag = "etag3123477" - expected_response = { - "name": name_2, - "display_name": display_name, - "dataset_id": dataset_id, - "etag": etag, - } - expected_response = model_pb2.Model(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.get_model(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.GetModelRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_model_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - with pytest.raises(CustomException): - client.get_model(name) - - def test_list_models(self): - # Setup Expected Response - next_page_token = "" - model_element = {} - model = [model_element] - expected_response = {"next_page_token": next_page_token, "model": model} - expected_response = service_pb2.ListModelsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - - paged_list_response = client.list_models(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.model[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = service_pb2.ListModelsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_models_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup request - parent = client.location_path("[PROJECT]", "[LOCATION]") - - paged_list_response = client.list_models(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_delete_model(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_delete_model", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.delete_model(name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.DeleteModelRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_delete_model_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.delete_model(name) - exception = response.exception() - assert exception.errors[0] == error - - def test_update_model(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - dataset_id = "datasetId-2115646910" - etag = "etag3123477" - expected_response = { - "name": name, - "display_name": display_name, - "dataset_id": dataset_id, - "etag": etag, - } - expected_response = model_pb2.Model(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - model = {} - update_mask = {} - - response = client.update_model(model, update_mask) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.UpdateModelRequest( - model=model, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_model_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup request - model = {} - update_mask = {} - - with pytest.raises(CustomException): - client.update_model(model, update_mask) - - def test_deploy_model(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_deploy_model", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.deploy_model(name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.DeployModelRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_deploy_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_deploy_model_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.deploy_model(name) - exception = response.exception() - assert exception.errors[0] == error - - def test_undeploy_model(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_undeploy_model", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.undeploy_model(name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.UndeployModelRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_undeploy_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_undeploy_model_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.undeploy_model(name) - exception = response.exception() - assert exception.errors[0] == error - - def test_export_model(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_export_model", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} - - response = client.export_model(name, output_config) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.ExportModelRequest( - name=name, output_config=output_config - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_export_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_export_model_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} - - response = client.export_model(name, output_config) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_model_evaluation(self): - # Setup Expected Response - name_2 = "name2-1052831874" - annotation_spec_id = "annotationSpecId60690191" - display_name = "displayName1615086568" - evaluated_example_count = 277565350 - expected_response = { - "name": name_2, - "annotation_spec_id": annotation_spec_id, - "display_name": display_name, - "evaluated_example_count": evaluated_example_count, - } - expected_response = model_evaluation_pb2.ModelEvaluation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.model_evaluation_path( - "[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]" - ) - - response = client.get_model_evaluation(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.GetModelEvaluationRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_model_evaluation_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup request - name = client.model_evaluation_path( - "[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]" - ) - - with pytest.raises(CustomException): - client.get_model_evaluation(name) - - def test_list_model_evaluations(self): - # Setup Expected Response - next_page_token = "" - model_evaluation_element = {} - model_evaluation = [model_evaluation_element] - expected_response = { - "next_page_token": next_page_token, - "model_evaluation": model_evaluation, - } - expected_response = service_pb2.ListModelEvaluationsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - filter_ = "filter-1274492040" - - paged_list_response = client.list_model_evaluations(parent, filter_) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.model_evaluation[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = service_pb2.ListModelEvaluationsRequest( - parent=parent, filter=filter_ - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_model_evaluations_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup request - parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - filter_ = "filter-1274492040" - - paged_list_response = client.list_model_evaluations(parent, filter_) - with pytest.raises(CustomException): - list(paged_list_response) diff --git a/tests/unit/gapic/v1/test_prediction_service_client_v1.py b/tests/unit/gapic/v1/test_prediction_service_client_v1.py deleted file mode 100644 index cd75ea01..00000000 --- a/tests/unit/gapic/v1/test_prediction_service_client_v1.py +++ /dev/null @@ -1,165 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import automl_v1 -from google.cloud.automl_v1.proto import data_items_pb2 -from google.cloud.automl_v1.proto import io_pb2 -from google.cloud.automl_v1.proto import prediction_service_pb2 -from google.longrunning import operations_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestPredictionServiceClient(object): - def test_predict(self): - # Setup Expected Response - expected_response = {} - expected_response = prediction_service_pb2.PredictResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.PredictionServiceClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - payload = {} - - response = client.predict(name, payload) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = prediction_service_pb2.PredictRequest( - name=name, payload=payload - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_predict_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.PredictionServiceClient() - - # Setup request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - payload = {} - - with pytest.raises(CustomException): - client.predict(name, payload) - - def test_batch_predict(self): - # Setup Expected Response - expected_response = {} - expected_response = prediction_service_pb2.BatchPredictResult( - **expected_response - ) - operation = operations_pb2.Operation( - name="operations/test_batch_predict", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.PredictionServiceClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - input_config = {} - output_config = {} - - response = client.batch_predict(name, input_config, output_config) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = prediction_service_pb2.BatchPredictRequest( - name=name, input_config=input_config, output_config=output_config - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_batch_predict_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_batch_predict_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.PredictionServiceClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - input_config = {} - output_config = {} - - response = client.batch_predict(name, input_config, output_config) - exception = response.exception() - assert exception.errors[0] == error diff --git a/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py b/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py deleted file mode 100644 index 87d1fe03..00000000 --- a/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py +++ /dev/null @@ -1,1255 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import automl_v1beta1 -from google.cloud.automl_v1beta1.proto import annotation_spec_pb2 -from google.cloud.automl_v1beta1.proto import column_spec_pb2 -from google.cloud.automl_v1beta1.proto import dataset_pb2 -from google.cloud.automl_v1beta1.proto import io_pb2 -from google.cloud.automl_v1beta1.proto import model_evaluation_pb2 -from google.cloud.automl_v1beta1.proto import model_pb2 -from google.cloud.automl_v1beta1.proto import service_pb2 -from google.cloud.automl_v1beta1.proto import table_spec_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestAutoMlClient(object): - def test_create_dataset(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - description = "description-1724546052" - example_count = 1517063674 - etag = "etag3123477" - expected_response = { - "name": name, - "display_name": display_name, - "description": description, - "example_count": example_count, - "etag": etag, - } - expected_response = dataset_pb2.Dataset(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - dataset = {} - - response = client.create_dataset(parent, dataset) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.CreateDatasetRequest( - parent=parent, dataset=dataset - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_dataset_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - parent = client.location_path("[PROJECT]", "[LOCATION]") - dataset = {} - - with pytest.raises(CustomException): - client.create_dataset(parent, dataset) - - def test_get_dataset(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - description = "description-1724546052" - example_count = 1517063674 - etag = "etag3123477" - expected_response = { - "name": name_2, - "display_name": display_name, - "description": description, - "example_count": example_count, - "etag": etag, - } - expected_response = dataset_pb2.Dataset(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - response = client.get_dataset(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.GetDatasetRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_dataset_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - with pytest.raises(CustomException): - client.get_dataset(name) - - def test_list_datasets(self): - # Setup Expected Response - next_page_token = "" - datasets_element = {} - datasets = [datasets_element] - expected_response = {"next_page_token": next_page_token, "datasets": datasets} - expected_response = service_pb2.ListDatasetsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - - paged_list_response = client.list_datasets(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.datasets[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = service_pb2.ListDatasetsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_datasets_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - parent = client.location_path("[PROJECT]", "[LOCATION]") - - paged_list_response = client.list_datasets(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_dataset(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - description = "description-1724546052" - example_count = 1517063674 - etag = "etag3123477" - expected_response = { - "name": name, - "display_name": display_name, - "description": description, - "example_count": example_count, - "etag": etag, - } - expected_response = dataset_pb2.Dataset(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - dataset = {} - - response = client.update_dataset(dataset) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.UpdateDatasetRequest(dataset=dataset) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_dataset_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - dataset = {} - - with pytest.raises(CustomException): - client.update_dataset(dataset) - - def test_delete_dataset(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_delete_dataset", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - response = client.delete_dataset(name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.DeleteDatasetRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_dataset_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_delete_dataset_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - response = client.delete_dataset(name) - exception = response.exception() - assert exception.errors[0] == error - - def test_import_data(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_import_data", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - input_config = {} - - response = client.import_data(name, input_config) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.ImportDataRequest( - name=name, input_config=input_config - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_import_data_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_import_data_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - input_config = {} - - response = client.import_data(name, input_config) - exception = response.exception() - assert exception.errors[0] == error - - def test_export_data(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_export_data", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - output_config = {} - - response = client.export_data(name, output_config) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.ExportDataRequest( - name=name, output_config=output_config - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_export_data_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_export_data_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - output_config = {} - - response = client.export_data(name, output_config) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_annotation_spec(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - example_count = 1517063674 - expected_response = { - "name": name_2, - "display_name": display_name, - "example_count": example_count, - } - expected_response = annotation_spec_pb2.AnnotationSpec(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.annotation_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]" - ) - - response = client.get_annotation_spec(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.GetAnnotationSpecRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_annotation_spec_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - name = client.annotation_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]" - ) - - with pytest.raises(CustomException): - client.get_annotation_spec(name) - - def test_get_table_spec(self): - # Setup Expected Response - name_2 = "name2-1052831874" - time_column_spec_id = "timeColumnSpecId1558734824" - row_count = 1340416618 - valid_row_count = 406068761 - column_count = 122671386 - etag = "etag3123477" - expected_response = { - "name": name_2, - "time_column_spec_id": time_column_spec_id, - "row_count": row_count, - "valid_row_count": valid_row_count, - "column_count": column_count, - "etag": etag, - } - expected_response = table_spec_pb2.TableSpec(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.table_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]" - ) - - response = client.get_table_spec(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.GetTableSpecRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_table_spec_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - name = client.table_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]" - ) - - with pytest.raises(CustomException): - client.get_table_spec(name) - - def test_list_table_specs(self): - # Setup Expected Response - next_page_token = "" - table_specs_element = {} - table_specs = [table_specs_element] - expected_response = { - "next_page_token": next_page_token, - "table_specs": table_specs, - } - expected_response = service_pb2.ListTableSpecsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - parent = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - paged_list_response = client.list_table_specs(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.table_specs[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = service_pb2.ListTableSpecsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_table_specs_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - parent = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - paged_list_response = client.list_table_specs(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_table_spec(self): - # Setup Expected Response - name = "name3373707" - time_column_spec_id = "timeColumnSpecId1558734824" - row_count = 1340416618 - valid_row_count = 406068761 - column_count = 122671386 - etag = "etag3123477" - expected_response = { - "name": name, - "time_column_spec_id": time_column_spec_id, - "row_count": row_count, - "valid_row_count": valid_row_count, - "column_count": column_count, - "etag": etag, - } - expected_response = table_spec_pb2.TableSpec(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - table_spec = {} - - response = client.update_table_spec(table_spec) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.UpdateTableSpecRequest(table_spec=table_spec) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_table_spec_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - table_spec = {} - - with pytest.raises(CustomException): - client.update_table_spec(table_spec) - - def test_get_column_spec(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - etag = "etag3123477" - expected_response = {"name": name_2, "display_name": display_name, "etag": etag} - expected_response = column_spec_pb2.ColumnSpec(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.column_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]", "[COLUMN_SPEC]" - ) - - response = client.get_column_spec(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.GetColumnSpecRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_column_spec_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - name = client.column_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]", "[COLUMN_SPEC]" - ) - - with pytest.raises(CustomException): - client.get_column_spec(name) - - def test_list_column_specs(self): - # Setup Expected Response - next_page_token = "" - column_specs_element = {} - column_specs = [column_specs_element] - expected_response = { - "next_page_token": next_page_token, - "column_specs": column_specs, - } - expected_response = service_pb2.ListColumnSpecsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - parent = client.table_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]" - ) - - paged_list_response = client.list_column_specs(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.column_specs[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = service_pb2.ListColumnSpecsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_column_specs_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - parent = client.table_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]" - ) - - paged_list_response = client.list_column_specs(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_column_spec(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - etag = "etag3123477" - expected_response = {"name": name, "display_name": display_name, "etag": etag} - expected_response = column_spec_pb2.ColumnSpec(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - column_spec = {} - - response = client.update_column_spec(column_spec) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.UpdateColumnSpecRequest(column_spec=column_spec) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_column_spec_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - column_spec = {} - - with pytest.raises(CustomException): - client.update_column_spec(column_spec) - - def test_create_model(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - dataset_id = "datasetId-2115646910" - expected_response = { - "name": name, - "display_name": display_name, - "dataset_id": dataset_id, - } - expected_response = model_pb2.Model(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_model", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - model = {} - - response = client.create_model(parent, model) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.CreateModelRequest(parent=parent, model=model) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_model_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - model = {} - - response = client.create_model(parent, model) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_model(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - dataset_id = "datasetId-2115646910" - expected_response = { - "name": name_2, - "display_name": display_name, - "dataset_id": dataset_id, - } - expected_response = model_pb2.Model(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.get_model(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.GetModelRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_model_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - with pytest.raises(CustomException): - client.get_model(name) - - def test_list_models(self): - # Setup Expected Response - next_page_token = "" - model_element = {} - model = [model_element] - expected_response = {"next_page_token": next_page_token, "model": model} - expected_response = service_pb2.ListModelsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - - paged_list_response = client.list_models(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.model[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = service_pb2.ListModelsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_models_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - parent = client.location_path("[PROJECT]", "[LOCATION]") - - paged_list_response = client.list_models(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_delete_model(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_delete_model", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.delete_model(name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.DeleteModelRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_delete_model_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.delete_model(name) - exception = response.exception() - assert exception.errors[0] == error - - def test_deploy_model(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_deploy_model", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.deploy_model(name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.DeployModelRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_deploy_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_deploy_model_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.deploy_model(name) - exception = response.exception() - assert exception.errors[0] == error - - def test_undeploy_model(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_undeploy_model", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.undeploy_model(name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.UndeployModelRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_undeploy_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_undeploy_model_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.undeploy_model(name) - exception = response.exception() - assert exception.errors[0] == error - - def test_export_model(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_export_model", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} - - response = client.export_model(name, output_config) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.ExportModelRequest( - name=name, output_config=output_config - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_export_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_export_model_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} - - response = client.export_model(name, output_config) - exception = response.exception() - assert exception.errors[0] == error - - def test_export_evaluated_examples(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_export_evaluated_examples", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} - - response = client.export_evaluated_examples(name, output_config) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.ExportEvaluatedExamplesRequest( - name=name, output_config=output_config - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_export_evaluated_examples_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_export_evaluated_examples_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} - - response = client.export_evaluated_examples(name, output_config) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_model_evaluation(self): - # Setup Expected Response - name_2 = "name2-1052831874" - annotation_spec_id = "annotationSpecId60690191" - display_name = "displayName1615086568" - evaluated_example_count = 277565350 - expected_response = { - "name": name_2, - "annotation_spec_id": annotation_spec_id, - "display_name": display_name, - "evaluated_example_count": evaluated_example_count, - } - expected_response = model_evaluation_pb2.ModelEvaluation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.model_evaluation_path( - "[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]" - ) - - response = client.get_model_evaluation(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.GetModelEvaluationRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_model_evaluation_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - name = client.model_evaluation_path( - "[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]" - ) - - with pytest.raises(CustomException): - client.get_model_evaluation(name) - - def test_list_model_evaluations(self): - # Setup Expected Response - next_page_token = "" - model_evaluation_element = {} - model_evaluation = [model_evaluation_element] - expected_response = { - "next_page_token": next_page_token, - "model_evaluation": model_evaluation, - } - expected_response = service_pb2.ListModelEvaluationsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - paged_list_response = client.list_model_evaluations(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.model_evaluation[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = service_pb2.ListModelEvaluationsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_model_evaluations_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - paged_list_response = client.list_model_evaluations(parent) - with pytest.raises(CustomException): - list(paged_list_response) diff --git a/tests/unit/gapic/v1beta1/test_prediction_service_client_v1beta1.py b/tests/unit/gapic/v1beta1/test_prediction_service_client_v1beta1.py deleted file mode 100644 index c83504a4..00000000 --- a/tests/unit/gapic/v1beta1/test_prediction_service_client_v1beta1.py +++ /dev/null @@ -1,170 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import automl_v1beta1 -from google.cloud.automl_v1beta1.proto import data_items_pb2 -from google.cloud.automl_v1beta1.proto import io_pb2 -from google.cloud.automl_v1beta1.proto import prediction_service_pb2 -from google.longrunning import operations_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestPredictionServiceClient(object): - def test_predict(self): - # Setup Expected Response - expected_response = {} - expected_response = prediction_service_pb2.PredictResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.PredictionServiceClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - payload = {} - - response = client.predict(name, payload) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = prediction_service_pb2.PredictRequest( - name=name, payload=payload - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_predict_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.PredictionServiceClient() - - # Setup request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - payload = {} - - with pytest.raises(CustomException): - client.predict(name, payload) - - def test_batch_predict(self): - # Setup Expected Response - expected_response = {} - expected_response = prediction_service_pb2.BatchPredictResult( - **expected_response - ) - operation = operations_pb2.Operation( - name="operations/test_batch_predict", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.PredictionServiceClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - input_config = {} - output_config = {} - params = {} - - response = client.batch_predict(name, input_config, output_config, params) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = prediction_service_pb2.BatchPredictRequest( - name=name, - input_config=input_config, - output_config=output_config, - params=params, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_batch_predict_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_batch_predict_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.PredictionServiceClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - input_config = {} - output_config = {} - params = {} - - response = client.batch_predict(name, input_config, output_config, params) - exception = response.exception() - assert exception.errors[0] == error diff --git a/tests/unit/gapic/v1beta1/test_gcs_client_v1beta1.py b/tests/unit/test_gcs_client_v1beta1.py similarity index 97% rename from tests/unit/gapic/v1beta1/test_gcs_client_v1beta1.py rename to tests/unit/test_gcs_client_v1beta1.py index 222fca32..306bb61f 100644 --- a/tests/unit/gapic/v1beta1/test_gcs_client_v1beta1.py +++ b/tests/unit/test_gcs_client_v1beta1.py @@ -31,9 +31,7 @@ class TestGcsClient(object): def gcs_client(self, bucket_name=None, client_attrs={}): client_mock = mock.Mock(**client_attrs) - return automl_v1beta1.tables.gcs_client.GcsClient( - bucket_name=bucket_name, client=client_mock - ) + return automl_v1beta1.GcsClient(bucket_name=bucket_name, client=client_mock) def test_init_with_project_and_credentials(self): # helper for checking that the storage client is initialized with the @@ -46,7 +44,7 @@ def __init__(self, project=None, credentials=None): patch = mock.patch("google.cloud.storage.Client", new=FakeStorageClient) with patch: credentials = AnonymousCredentials() - gcs_client = automl_v1beta1.tables.gcs_client.GcsClient( + gcs_client = automl_v1beta1.GcsClient( project=PROJECT, credentials=credentials ) assert isinstance(gcs_client.client, FakeStorageClient) diff --git a/tests/unit/gapic/v1beta1/test_tables_client_v1beta1.py b/tests/unit/test_tables_client_v1beta1.py similarity index 68% rename from tests/unit/gapic/v1beta1/test_tables_client_v1beta1.py rename to tests/unit/test_tables_client_v1beta1.py index 61f39a98..1d5b168c 100644 --- a/tests/unit/gapic/v1beta1/test_tables_client_v1beta1.py +++ b/tests/unit/test_tables_client_v1beta1.py @@ -23,8 +23,8 @@ from google.api_core import exceptions from google.auth.credentials import AnonymousCredentials from google.cloud import automl_v1beta1 -from google.cloud.automl_v1beta1.proto import data_types_pb2, data_items_pb2 -from google.protobuf import struct_pb2 +from google.cloud.automl_v1beta1.types import data_types, data_items +from google.protobuf import struct_pb2 as struct PROJECT = "project" REGION = "region" @@ -48,44 +48,47 @@ def tables_client( def test_list_datasets_empty(self): client = self.tables_client( - { + client_attrs={ "list_datasets.return_value": [], "location_path.return_value": LOCATION_PATH, }, - {}, + prediction_client_attrs={}, ) ds = client.list_datasets() - client.auto_ml_client.location_path.assert_called_with(PROJECT, REGION) - client.auto_ml_client.list_datasets.assert_called_with(LOCATION_PATH) + + request = automl_v1beta1.ListDatasetsRequest(parent=LOCATION_PATH) + client.auto_ml_client.list_datasets.assert_called_with(request=request) assert ds == [] def test_list_datasets_not_empty(self): datasets = ["some_dataset"] client = self.tables_client( - { + client_attrs={ "list_datasets.return_value": datasets, "location_path.return_value": LOCATION_PATH, }, - {}, + prediction_client_attrs={}, ) ds = client.list_datasets() - client.auto_ml_client.location_path.assert_called_with(PROJECT, REGION) - client.auto_ml_client.list_datasets.assert_called_with(LOCATION_PATH) + + request = automl_v1beta1.ListDatasetsRequest(parent=LOCATION_PATH) + client.auto_ml_client.list_datasets.assert_called_with(request=request) assert len(ds) == 1 assert ds[0] == "some_dataset" def test_get_dataset_no_value(self): - dataset_actual = "dataset" client = self.tables_client({}, {}) with pytest.raises(ValueError): - dataset = client.get_dataset() + client.get_dataset() client.auto_ml_client.get_dataset.assert_not_called() def test_get_dataset_name(self): dataset_actual = "dataset" client = self.tables_client({"get_dataset.return_value": dataset_actual}, {}) dataset = client.get_dataset(dataset_name="my_dataset") - client.auto_ml_client.get_dataset.assert_called_with("my_dataset") + client.auto_ml_client.get_dataset.assert_called_with( + request=automl_v1beta1.GetDatasetRequest(name="my_dataset") + ) assert dataset == dataset_actual def test_get_no_dataset(self): @@ -94,7 +97,9 @@ def test_get_no_dataset(self): ) with pytest.raises(exceptions.NotFound): client.get_dataset(dataset_name="my_dataset") - client.auto_ml_client.get_dataset.assert_called_with("my_dataset") + client.auto_ml_client.get_dataset.assert_called_with( + request=automl_v1beta1.GetDatasetRequest(name="my_dataset") + ) def test_get_dataset_from_empty_list(self): client = self.tables_client({"list_datasets.return_value": []}, {}) @@ -143,11 +148,14 @@ def test_create_dataset(self): }, {}, ) - metadata = {"metadata": "values"} + metadata = {"primary_table_spec_id": "1234"} dataset = client.create_dataset("name", metadata=metadata) - client.auto_ml_client.location_path.assert_called_with(PROJECT, REGION) + client.auto_ml_client.create_dataset.assert_called_with( - LOCATION_PATH, {"display_name": "name", "tables_dataset_metadata": metadata} + request=automl_v1beta1.CreateDatasetRequest( + parent=LOCATION_PATH, + dataset={"display_name": "name", "tables_dataset_metadata": metadata}, + ) ) assert dataset.display_name == "name" @@ -156,7 +164,9 @@ def test_delete_dataset(self): dataset.configure_mock(name="name") client = self.tables_client({"delete_dataset.return_value": None}, {}) client.delete_dataset(dataset=dataset) - client.auto_ml_client.delete_dataset.assert_called_with("name") + client.auto_ml_client.delete_dataset.assert_called_with( + request=automl_v1beta1.DeleteDatasetRequest(name="name") + ) def test_delete_dataset_not_found(self): client = self.tables_client({"list_datasets.return_value": []}, {}) @@ -166,7 +176,9 @@ def test_delete_dataset_not_found(self): def test_delete_dataset_name(self): client = self.tables_client({"delete_dataset.return_value": None}, {}) client.delete_dataset(dataset_name="name") - client.auto_ml_client.delete_dataset.assert_called_with("name") + client.auto_ml_client.delete_dataset.assert_called_with( + request=automl_v1beta1.DeleteDatasetRequest(name="name") + ) def test_export_not_found(self): client = self.tables_client({"list_datasets.return_value": []}, {}) @@ -179,14 +191,20 @@ def test_export_gcs_uri(self): client = self.tables_client({"export_data.return_value": None}, {}) client.export_data(dataset_name="name", gcs_output_uri_prefix="uri") client.auto_ml_client.export_data.assert_called_with( - "name", {"gcs_destination": {"output_uri_prefix": "uri"}} + request=automl_v1beta1.ExportDataRequest( + name="name", + output_config={"gcs_destination": {"output_uri_prefix": "uri"}}, + ) ) def test_export_bq_uri(self): client = self.tables_client({"export_data.return_value": None}, {}) client.export_data(dataset_name="name", bigquery_output_uri="uri") client.auto_ml_client.export_data.assert_called_with( - "name", {"bigquery_destination": {"output_uri": "uri"}} + request=automl_v1beta1.ExportDataRequest( + name="name", + output_config={"bigquery_destination": {"output_uri": "uri"}}, + ) ) def test_import_not_found(self): @@ -213,7 +231,9 @@ def test_import_pandas_dataframe(self): client.gcs_client.ensure_bucket_exists.assert_called_with(PROJECT, REGION) client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe) client.auto_ml_client.import_data.assert_called_with( - "name", {"gcs_source": {"input_uris": ["uri"]}} + request=automl_v1beta1.ImportDataRequest( + name="name", input_config={"gcs_source": {"input_uris": ["uri"]}} + ) ) def test_import_pandas_dataframe_init_gcs(self): @@ -227,7 +247,7 @@ def test_import_pandas_dataframe_init_gcs(self): dataframe = pandas.DataFrame({}) patch = mock.patch( - "google.cloud.automl_v1beta1.tables.tables_client.gcs_client.GcsClient", + "google.cloud.automl_v1beta1.services.tables.tables_client.gcs_client.GcsClient", bucket_name="my_bucket", ) with patch as MockGcsClient: @@ -240,34 +260,44 @@ def test_import_pandas_dataframe_init_gcs(self): client.gcs_client.ensure_bucket_exists.assert_called_with(PROJECT, REGION) client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe) client.auto_ml_client.import_data.assert_called_with( - "name", {"gcs_source": {"input_uris": ["uri"]}} + request=automl_v1beta1.ImportDataRequest( + name="name", input_config={"gcs_source": {"input_uris": ["uri"]}} + ) ) def test_import_gcs_uri(self): client = self.tables_client({"import_data.return_value": None}, {}) client.import_data(dataset_name="name", gcs_input_uris="uri") client.auto_ml_client.import_data.assert_called_with( - "name", {"gcs_source": {"input_uris": ["uri"]}} + request=automl_v1beta1.ImportDataRequest( + name="name", input_config={"gcs_source": {"input_uris": ["uri"]}} + ) ) def test_import_gcs_uris(self): client = self.tables_client({"import_data.return_value": None}, {}) client.import_data(dataset_name="name", gcs_input_uris=["uri", "uri"]) client.auto_ml_client.import_data.assert_called_with( - "name", {"gcs_source": {"input_uris": ["uri", "uri"]}} + request=automl_v1beta1.ImportDataRequest( + name="name", input_config={"gcs_source": {"input_uris": ["uri", "uri"]}} + ) ) def test_import_bq_uri(self): client = self.tables_client({"import_data.return_value": None}, {}) client.import_data(dataset_name="name", bigquery_input_uri="uri") client.auto_ml_client.import_data.assert_called_with( - "name", {"bigquery_source": {"input_uri": "uri"}} + request=automl_v1beta1.ImportDataRequest( + name="name", input_config={"bigquery_source": {"input_uri": "uri"}} + ) ) def test_list_table_specs(self): client = self.tables_client({"list_table_specs.return_value": None}, {}) client.list_table_specs(dataset_name="name") - client.auto_ml_client.list_table_specs.assert_called_with("name") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) def test_list_table_specs_not_found(self): client = self.tables_client( @@ -275,17 +305,23 @@ def test_list_table_specs_not_found(self): ) with pytest.raises(exceptions.NotFound): client.list_table_specs(dataset_name="name") - client.auto_ml_client.list_table_specs.assert_called_with("name") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) def test_get_table_spec(self): client = self.tables_client({}, {}) client.get_table_spec("name") - client.auto_ml_client.get_table_spec.assert_called_with("name") + client.auto_ml_client.get_table_spec.assert_called_with( + request=automl_v1beta1.GetTableSpecRequest(name="name") + ) def test_get_column_spec(self): client = self.tables_client({}, {}) client.get_column_spec("name") - client.auto_ml_client.get_column_spec.assert_called_with("name") + client.auto_ml_client.get_column_spec.assert_called_with( + request=automl_v1beta1.GetColumnSpecRequest(name="name") + ) def test_list_column_specs(self): table_spec_mock = mock.Mock() @@ -299,171 +335,238 @@ def test_list_column_specs(self): {}, ) client.list_column_specs(dataset_name="name") - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") + ) def test_update_column_spec_not_found(self): table_spec_mock = mock.Mock() # name is reserved in use of __init__, needs to be passed here table_spec_mock.configure_mock(name="table") - column_spec_mock = mock.Mock() - data_type_mock = mock.Mock(type_code="type_code") - column_spec_mock.configure_mock( - name="column", display_name="column", data_type=data_type_mock + + column_spec = automl_v1beta1.ColumnSpec( + name="column", + display_name="column", + data_type=automl_v1beta1.DataType(type_code=automl_v1beta1.TypeCode.STRING), ) + client = self.tables_client( - { + client_attrs={ "list_table_specs.return_value": [table_spec_mock], - "list_column_specs.return_value": [column_spec_mock], + "list_column_specs.return_value": [column_spec], }, - {}, + prediction_client_attrs={}, ) with pytest.raises(exceptions.NotFound): client.update_column_spec(dataset_name="name", column_spec_name="column2") - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") + ) client.auto_ml_client.update_column_spec.assert_not_called() def test_update_column_spec_display_name_not_found(self): table_spec_mock = mock.Mock() # name is reserved in use of __init__, needs to be passed here table_spec_mock.configure_mock(name="table") - column_spec_mock = mock.Mock() - data_type_mock = mock.Mock(type_code="type_code") - column_spec_mock.configure_mock( - name="column", display_name="column", data_type=data_type_mock + + column_spec = automl_v1beta1.ColumnSpec( + name="column", + display_name="column", + data_type=automl_v1beta1.DataType(type_code=automl_v1beta1.TypeCode.STRING), ) client = self.tables_client( - { + client_attrs={ "list_table_specs.return_value": [table_spec_mock], - "list_column_specs.return_value": [column_spec_mock], + "list_column_specs.return_value": [column_spec], }, - {}, + prediction_client_attrs={}, ) with pytest.raises(exceptions.NotFound): client.update_column_spec( dataset_name="name", column_spec_display_name="column2" ) - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") + ) client.auto_ml_client.update_column_spec.assert_not_called() def test_update_column_spec_name_no_args(self): table_spec_mock = mock.Mock() # name is reserved in use of __init__, needs to be passed here table_spec_mock.configure_mock(name="table") - column_spec_mock = mock.Mock() - data_type_mock = mock.Mock(type_code="type_code") - column_spec_mock.configure_mock( - name="column/2", display_name="column", data_type=data_type_mock + + column_spec = automl_v1beta1.ColumnSpec( + name="column/2", + display_name="column", + data_type=automl_v1beta1.DataType( + type_code=automl_v1beta1.TypeCode.FLOAT64 + ), ) + client = self.tables_client( { "list_table_specs.return_value": [table_spec_mock], - "list_column_specs.return_value": [column_spec_mock], + "list_column_specs.return_value": [column_spec], }, {}, ) client.update_column_spec(dataset_name="name", column_spec_name="column/2") - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") + ) client.auto_ml_client.update_column_spec.assert_called_with( - {"name": "column/2", "data_type": {"type_code": "type_code"}} + request=automl_v1beta1.UpdateColumnSpecRequest( + column_spec={ + "name": "column/2", + "data_type": {"type_code": automl_v1beta1.TypeCode.FLOAT64}, + } + ) ) def test_update_column_spec_no_args(self): table_spec_mock = mock.Mock() # name is reserved in use of __init__, needs to be passed here table_spec_mock.configure_mock(name="table") - column_spec_mock = mock.Mock() - data_type_mock = mock.Mock(type_code="type_code") - column_spec_mock.configure_mock( - name="column", display_name="column", data_type=data_type_mock + + column_spec = automl_v1beta1.ColumnSpec( + name="column", + display_name="column", + data_type=automl_v1beta1.DataType( + type_code=automl_v1beta1.TypeCode.FLOAT64 + ), ) + client = self.tables_client( { "list_table_specs.return_value": [table_spec_mock], - "list_column_specs.return_value": [column_spec_mock], + "list_column_specs.return_value": [column_spec], }, {}, ) client.update_column_spec( dataset_name="name", column_spec_display_name="column" ) - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") + ) client.auto_ml_client.update_column_spec.assert_called_with( - {"name": "column", "data_type": {"type_code": "type_code"}} + request=automl_v1beta1.UpdateColumnSpecRequest( + column_spec={ + "name": "column", + "data_type": {"type_code": automl_v1beta1.TypeCode.FLOAT64}, + } + ) ) def test_update_column_spec_nullable(self): table_spec_mock = mock.Mock() # name is reserved in use of __init__, needs to be passed here table_spec_mock.configure_mock(name="table") - column_spec_mock = mock.Mock() - data_type_mock = mock.Mock(type_code="type_code") - column_spec_mock.configure_mock( - name="column", display_name="column", data_type=data_type_mock + + column_spec = automl_v1beta1.ColumnSpec( + name="column", + display_name="column", + data_type=automl_v1beta1.DataType( + type_code=automl_v1beta1.TypeCode.FLOAT64 + ), ) + client = self.tables_client( { "list_table_specs.return_value": [table_spec_mock], - "list_column_specs.return_value": [column_spec_mock], + "list_column_specs.return_value": [column_spec], }, {}, ) client.update_column_spec( dataset_name="name", column_spec_display_name="column", nullable=True ) - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") + ) client.auto_ml_client.update_column_spec.assert_called_with( - { - "name": "column", - "data_type": {"type_code": "type_code", "nullable": True}, - } + request=automl_v1beta1.UpdateColumnSpecRequest( + column_spec={ + "name": "column", + "data_type": { + "type_code": automl_v1beta1.TypeCode.FLOAT64, + "nullable": True, + }, + } + ) ) def test_update_column_spec_type_code(self): table_spec_mock = mock.Mock() # name is reserved in use of __init__, needs to be passed here table_spec_mock.configure_mock(name="table") - column_spec_mock = mock.Mock() - data_type_mock = mock.Mock(type_code="type_code") - column_spec_mock.configure_mock( - name="column", display_name="column", data_type=data_type_mock + column_spec = automl_v1beta1.ColumnSpec( + name="column", + display_name="column", + data_type=automl_v1beta1.DataType( + type_code=automl_v1beta1.TypeCode.FLOAT64 + ), ) client = self.tables_client( { "list_table_specs.return_value": [table_spec_mock], - "list_column_specs.return_value": [column_spec_mock], + "list_column_specs.return_value": [column_spec], }, {}, ) client.update_column_spec( dataset_name="name", column_spec_display_name="column", - type_code="type_code2", + type_code=automl_v1beta1.TypeCode.ARRAY, + ) + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") ) - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") client.auto_ml_client.update_column_spec.assert_called_with( - {"name": "column", "data_type": {"type_code": "type_code2"}} + request=automl_v1beta1.UpdateColumnSpecRequest( + column_spec={ + "name": "column", + "data_type": {"type_code": automl_v1beta1.TypeCode.ARRAY}, + } + ) ) def test_update_column_spec_type_code_nullable(self): table_spec_mock = mock.Mock() # name is reserved in use of __init__, needs to be passed here table_spec_mock.configure_mock(name="table") - column_spec_mock = mock.Mock() - data_type_mock = mock.Mock(type_code="type_code") - column_spec_mock.configure_mock( - name="column", display_name="column", data_type=data_type_mock + column_spec = automl_v1beta1.ColumnSpec( + name="column", + display_name="column", + data_type=automl_v1beta1.DataType( + type_code=automl_v1beta1.TypeCode.FLOAT64 + ), ) client = self.tables_client( { "list_table_specs.return_value": [table_spec_mock], - "list_column_specs.return_value": [column_spec_mock], + "list_column_specs.return_value": [column_spec], }, {}, ) @@ -471,30 +574,41 @@ def test_update_column_spec_type_code_nullable(self): dataset_name="name", nullable=True, column_spec_display_name="column", - type_code="type_code2", + type_code=automl_v1beta1.TypeCode.ARRAY, + ) + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") ) - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") client.auto_ml_client.update_column_spec.assert_called_with( - { - "name": "column", - "data_type": {"type_code": "type_code2", "nullable": True}, - } + request=automl_v1beta1.UpdateColumnSpecRequest( + column_spec={ + "name": "column", + "data_type": { + "type_code": automl_v1beta1.TypeCode.ARRAY, + "nullable": True, + }, + } + ) ) def test_update_column_spec_type_code_nullable_false(self): table_spec_mock = mock.Mock() # name is reserved in use of __init__, needs to be passed here table_spec_mock.configure_mock(name="table") - column_spec_mock = mock.Mock() - data_type_mock = mock.Mock(type_code="type_code") - column_spec_mock.configure_mock( - name="column", display_name="column", data_type=data_type_mock + column_spec = automl_v1beta1.ColumnSpec( + name="column", + display_name="column", + data_type=automl_v1beta1.DataType( + type_code=automl_v1beta1.TypeCode.FLOAT64 + ), ) client = self.tables_client( { "list_table_specs.return_value": [table_spec_mock], - "list_column_specs.return_value": [column_spec_mock], + "list_column_specs.return_value": [column_spec], }, {}, ) @@ -502,15 +616,24 @@ def test_update_column_spec_type_code_nullable_false(self): dataset_name="name", nullable=False, column_spec_display_name="column", - type_code="type_code2", + type_code=automl_v1beta1.TypeCode.FLOAT64, + ) + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") ) - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") client.auto_ml_client.update_column_spec.assert_called_with( - { - "name": "column", - "data_type": {"type_code": "type_code2", "nullable": False}, - } + request=automl_v1beta1.UpdateColumnSpecRequest( + column_spec={ + "name": "column", + "data_type": { + "type_code": automl_v1beta1.TypeCode.FLOAT64, + "nullable": False, + }, + } + ) ) def test_set_target_column_table_not_found(self): @@ -521,7 +644,9 @@ def test_set_target_column_table_not_found(self): client.set_target_column( dataset_name="name", column_spec_display_name="column2" ) - client.auto_ml_client.list_table_specs.assert_called_with("name") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) client.auto_ml_client.list_column_specs.assert_not_called() client.auto_ml_client.update_dataset.assert_not_called() @@ -542,8 +667,12 @@ def test_set_target_column_not_found(self): client.set_target_column( dataset_name="name", column_spec_display_name="column2" ) - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") + ) client.auto_ml_client.update_dataset.assert_not_called() def test_set_target_column(self): @@ -571,17 +700,23 @@ def test_set_target_column(self): {}, ) client.set_target_column(dataset_name="name", column_spec_display_name="column") - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") + ) client.auto_ml_client.update_dataset.assert_called_with( - { - "name": "dataset", - "tables_dataset_metadata": { - "target_column_spec_id": "1", - "weight_column_spec_id": "2", - "ml_use_column_spec_id": "3", - }, - } + request=automl_v1beta1.UpdateDatasetRequest( + dataset={ + "name": "dataset", + "tables_dataset_metadata": { + "target_column_spec_id": "1", + "weight_column_spec_id": "2", + "ml_use_column_spec_id": "3", + }, + } + ) ) def test_set_weight_column_table_not_found(self): @@ -594,7 +729,9 @@ def test_set_weight_column_table_not_found(self): ) except exceptions.NotFound: pass - client.auto_ml_client.list_table_specs.assert_called_with("name") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) client.auto_ml_client.list_column_specs.assert_not_called() client.auto_ml_client.update_dataset.assert_not_called() @@ -615,8 +752,12 @@ def test_set_weight_column_not_found(self): client.set_weight_column( dataset_name="name", column_spec_display_name="column2" ) - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") + ) client.auto_ml_client.update_dataset.assert_not_called() def test_set_weight_column(self): @@ -644,17 +785,23 @@ def test_set_weight_column(self): {}, ) client.set_weight_column(dataset_name="name", column_spec_display_name="column") - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") + ) client.auto_ml_client.update_dataset.assert_called_with( - { - "name": "dataset", - "tables_dataset_metadata": { - "target_column_spec_id": "1", - "weight_column_spec_id": "2", - "ml_use_column_spec_id": "3", - }, - } + request=automl_v1beta1.UpdateDatasetRequest( + dataset={ + "name": "dataset", + "tables_dataset_metadata": { + "target_column_spec_id": "1", + "weight_column_spec_id": "2", + "ml_use_column_spec_id": "3", + }, + } + ) ) def test_clear_weight_column(self): @@ -671,14 +818,16 @@ def test_clear_weight_column(self): client = self.tables_client({"get_dataset.return_value": dataset_mock}, {}) client.clear_weight_column(dataset_name="name") client.auto_ml_client.update_dataset.assert_called_with( - { - "name": "dataset", - "tables_dataset_metadata": { - "target_column_spec_id": "1", - "weight_column_spec_id": None, - "ml_use_column_spec_id": "3", - }, - } + request=automl_v1beta1.UpdateDatasetRequest( + dataset={ + "name": "dataset", + "tables_dataset_metadata": { + "target_column_spec_id": "1", + "weight_column_spec_id": None, + "ml_use_column_spec_id": "3", + }, + } + ) ) def test_set_test_train_column_table_not_found(self): @@ -689,7 +838,9 @@ def test_set_test_train_column_table_not_found(self): client.set_test_train_column( dataset_name="name", column_spec_display_name="column2" ) - client.auto_ml_client.list_table_specs.assert_called_with("name") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) client.auto_ml_client.list_column_specs.assert_not_called() client.auto_ml_client.update_dataset.assert_not_called() @@ -710,8 +861,12 @@ def test_set_test_train_column_not_found(self): client.set_test_train_column( dataset_name="name", column_spec_display_name="column2" ) - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") + ) client.auto_ml_client.update_dataset.assert_not_called() def test_set_test_train_column(self): @@ -741,17 +896,23 @@ def test_set_test_train_column(self): client.set_test_train_column( dataset_name="name", column_spec_display_name="column" ) - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") + ) client.auto_ml_client.update_dataset.assert_called_with( - { - "name": "dataset", - "tables_dataset_metadata": { - "target_column_spec_id": "1", - "weight_column_spec_id": "2", - "ml_use_column_spec_id": "3", - }, - } + request=automl_v1beta1.UpdateDatasetRequest( + dataset={ + "name": "dataset", + "tables_dataset_metadata": { + "target_column_spec_id": "1", + "weight_column_spec_id": "2", + "ml_use_column_spec_id": "3", + }, + } + ) ) def test_clear_test_train_column(self): @@ -768,14 +929,16 @@ def test_clear_test_train_column(self): client = self.tables_client({"get_dataset.return_value": dataset_mock}, {}) client.clear_test_train_column(dataset_name="name") client.auto_ml_client.update_dataset.assert_called_with( - { - "name": "dataset", - "tables_dataset_metadata": { - "target_column_spec_id": "1", - "weight_column_spec_id": "2", - "ml_use_column_spec_id": None, - }, - } + request=automl_v1beta1.UpdateDatasetRequest( + dataset={ + "name": "dataset", + "tables_dataset_metadata": { + "target_column_spec_id": "1", + "weight_column_spec_id": "2", + "ml_use_column_spec_id": None, + }, + } + ) ) def test_set_time_column(self): @@ -795,10 +958,16 @@ def test_set_time_column(self): {}, ) client.set_time_column(dataset_name="name", column_spec_display_name="column") - client.auto_ml_client.list_table_specs.assert_called_with("name") - client.auto_ml_client.list_column_specs.assert_called_with("table") + client.auto_ml_client.list_table_specs.assert_called_with( + request=automl_v1beta1.ListTableSpecsRequest(parent="name") + ) + client.auto_ml_client.list_column_specs.assert_called_with( + request=automl_v1beta1.ListColumnSpecsRequest(parent="table") + ) client.auto_ml_client.update_table_spec.assert_called_with( - {"name": "table", "time_column_spec_id": "3"} + request=automl_v1beta1.UpdateTableSpecRequest( + table_spec={"name": "table", "time_column_spec_id": "3"} + ) ) def test_clear_time_column(self): @@ -816,18 +985,24 @@ def test_clear_time_column(self): ) client.clear_time_column(dataset_name="name") client.auto_ml_client.update_table_spec.assert_called_with( - {"name": "table", "time_column_spec_id": None} + request=automl_v1beta1.UpdateTableSpecRequest( + table_spec={"name": "table", "time_column_spec_id": None} + ) ) def test_get_model_evaluation(self): client = self.tables_client({}, {}) - ds = client.get_model_evaluation(model_evaluation_name="x") - client.auto_ml_client.get_model_evaluation.assert_called_with("x") + client.get_model_evaluation(model_evaluation_name="x") + client.auto_ml_client.get_model_evaluation.assert_called_with( + request=automl_v1beta1.GetModelEvaluationRequest(name="x") + ) def test_list_model_evaluations_empty(self): client = self.tables_client({"list_model_evaluations.return_value": []}, {}) ds = client.list_model_evaluations(model_name="model") - client.auto_ml_client.list_model_evaluations.assert_called_with("model") + client.auto_ml_client.list_model_evaluations.assert_called_with( + request=automl_v1beta1.ListModelEvaluationsRequest(parent="model") + ) assert ds == [] def test_list_model_evaluations_not_empty(self): @@ -840,7 +1015,9 @@ def test_list_model_evaluations_not_empty(self): {}, ) ds = client.list_model_evaluations(model_name="model") - client.auto_ml_client.list_model_evaluations.assert_called_with("model") + client.auto_ml_client.list_model_evaluations.assert_called_with( + request=automl_v1beta1.ListModelEvaluationsRequest(parent="model") + ) assert len(ds) == 1 assert ds[0] == "eval" @@ -853,8 +1030,10 @@ def test_list_models_empty(self): {}, ) ds = client.list_models() - client.auto_ml_client.location_path.assert_called_with(PROJECT, REGION) - client.auto_ml_client.list_models.assert_called_with(LOCATION_PATH) + + client.auto_ml_client.list_models.assert_called_with( + request=automl_v1beta1.ListModelsRequest(parent=LOCATION_PATH) + ) assert ds == [] def test_list_models_not_empty(self): @@ -867,8 +1046,10 @@ def test_list_models_not_empty(self): {}, ) ds = client.list_models() - client.auto_ml_client.location_path.assert_called_with(PROJECT, REGION) - client.auto_ml_client.list_models.assert_called_with(LOCATION_PATH) + + client.auto_ml_client.list_models.assert_called_with( + request=automl_v1beta1.ListModelsRequest(parent=LOCATION_PATH) + ) assert len(ds) == 1 assert ds[0] == "some_model" @@ -876,7 +1057,7 @@ def test_get_model_name(self): model_actual = "model" client = self.tables_client({"get_model.return_value": model_actual}, {}) model = client.get_model(model_name="my_model") - client.auto_ml_client.get_model.assert_called_with("my_model") + client.auto_ml_client.get_model.assert_called_with(name="my_model") assert model == model_actual def test_get_no_model(self): @@ -885,7 +1066,7 @@ def test_get_no_model(self): ) with pytest.raises(exceptions.NotFound): client.get_model(model_name="my_model") - client.auto_ml_client.get_model.assert_called_with("my_model") + client.auto_ml_client.get_model.assert_called_with(name="my_model") def test_get_model_from_empty_list(self): client = self.tables_client({"list_models.return_value": []}, {}) @@ -931,7 +1112,9 @@ def test_delete_model(self): model.configure_mock(name="name") client = self.tables_client({"delete_model.return_value": None}, {}) client.delete_model(model=model) - client.auto_ml_client.delete_model.assert_called_with("name") + client.auto_ml_client.delete_model.assert_called_with( + request=automl_v1beta1.DeleteModelRequest(name="name") + ) def test_delete_model_not_found(self): client = self.tables_client({"list_models.return_value": []}, {}) @@ -941,7 +1124,9 @@ def test_delete_model_not_found(self): def test_delete_model_name(self): client = self.tables_client({"delete_model.return_value": None}, {}) client.delete_model(model_name="name") - client.auto_ml_client.delete_model.assert_called_with("name") + client.auto_ml_client.delete_model.assert_called_with( + request=automl_v1beta1.DeleteModelRequest(name="name") + ) def test_deploy_model_no_args(self): client = self.tables_client({}, {}) @@ -952,7 +1137,9 @@ def test_deploy_model_no_args(self): def test_deploy_model(self): client = self.tables_client({}, {}) client.deploy_model(model_name="name") - client.auto_ml_client.deploy_model.assert_called_with("name") + client.auto_ml_client.deploy_model.assert_called_with( + request=automl_v1beta1.DeployModelRequest(name="name") + ) def test_deploy_model_not_found(self): client = self.tables_client({"list_models.return_value": []}, {}) @@ -963,7 +1150,9 @@ def test_deploy_model_not_found(self): def test_undeploy_model(self): client = self.tables_client({}, {}) client.undeploy_model(model_name="name") - client.auto_ml_client.undeploy_model.assert_called_with("name") + client.auto_ml_client.undeploy_model.assert_called_with( + request=automl_v1beta1.UndeployModelRequest(name="name") + ) def test_undeploy_model_not_found(self): client = self.tables_client({"list_models.return_value": []}, {}) @@ -989,32 +1178,37 @@ def test_create_model(self): "my_model", dataset_name="my_dataset", train_budget_milli_node_hours=1000 ) client.auto_ml_client.create_model.assert_called_with( - LOCATION_PATH, - { - "display_name": "my_model", - "dataset_id": "my_dataset", - "tables_model_metadata": {"train_budget_milli_node_hours": 1000}, - }, + request=automl_v1beta1.CreateModelRequest( + parent=LOCATION_PATH, + model={ + "display_name": "my_model", + "dataset_id": "my_dataset", + "tables_model_metadata": {"train_budget_milli_node_hours": 1000}, + }, + ) ) def test_create_model_include_columns(self): table_spec_mock = mock.Mock() # name is reserved in use of __init__, needs to be passed here table_spec_mock.configure_mock(name="table") - column_spec_mock1 = mock.Mock() - column_spec_mock1.configure_mock(name="column/1", display_name="column1") - column_spec_mock2 = mock.Mock() - column_spec_mock2.configure_mock(name="column/2", display_name="column2") + + column_spec_1 = automl_v1beta1.ColumnSpec( + name="column/1", display_name="column1" + ) + column_spec_2 = automl_v1beta1.ColumnSpec( + name="column/2", display_name="column2" + ) + client = self.tables_client( - { - "list_table_specs.return_value": [table_spec_mock], - "list_column_specs.return_value": [ - column_spec_mock1, - column_spec_mock2, + client_attrs={ + "list_table_specs.return_value": [ + automl_v1beta1.TableSpec(name="table") ], + "list_column_specs.return_value": [column_spec_1, column_spec_2], "location_path.return_value": LOCATION_PATH, }, - {}, + prediction_client_attrs={}, ) client.create_model( "my_model", @@ -1023,35 +1217,37 @@ def test_create_model_include_columns(self): train_budget_milli_node_hours=1000, ) client.auto_ml_client.create_model.assert_called_with( - LOCATION_PATH, - { - "display_name": "my_model", - "dataset_id": "my_dataset", - "tables_model_metadata": { - "train_budget_milli_node_hours": 1000, - "input_feature_column_specs": [column_spec_mock1], - }, - }, + request=automl_v1beta1.CreateModelRequest( + parent=LOCATION_PATH, + model=automl_v1beta1.Model( + display_name="my_model", + dataset_id="my_dataset", + tables_model_metadata=automl_v1beta1.TablesModelMetadata( + train_budget_milli_node_hours=1000, + input_feature_column_specs=[column_spec_1], + ), + ), + ) ) def test_create_model_exclude_columns(self): table_spec_mock = mock.Mock() # name is reserved in use of __init__, needs to be passed here table_spec_mock.configure_mock(name="table") - column_spec_mock1 = mock.Mock() - column_spec_mock1.configure_mock(name="column/1", display_name="column1") - column_spec_mock2 = mock.Mock() - column_spec_mock2.configure_mock(name="column/2", display_name="column2") + + column_spec_1 = automl_v1beta1.ColumnSpec( + name="column/1", display_name="column1" + ) + column_spec_2 = automl_v1beta1.ColumnSpec( + name="column/2", display_name="column2" + ) client = self.tables_client( - { + client_attrs={ "list_table_specs.return_value": [table_spec_mock], - "list_column_specs.return_value": [ - column_spec_mock1, - column_spec_mock2, - ], + "list_column_specs.return_value": [column_spec_1, column_spec_2], "location_path.return_value": LOCATION_PATH, }, - {}, + prediction_client_attrs={}, ) client.create_model( "my_model", @@ -1060,15 +1256,17 @@ def test_create_model_exclude_columns(self): train_budget_milli_node_hours=1000, ) client.auto_ml_client.create_model.assert_called_with( - LOCATION_PATH, - { - "display_name": "my_model", - "dataset_id": "my_dataset", - "tables_model_metadata": { - "train_budget_milli_node_hours": 1000, - "input_feature_column_specs": [column_spec_mock2], - }, - }, + request=automl_v1beta1.CreateModelRequest( + parent=LOCATION_PATH, + model=automl_v1beta1.Model( + display_name="my_model", + dataset_id="my_dataset", + tables_model_metadata=automl_v1beta1.TablesModelMetadata( + train_budget_milli_node_hours=1000, + input_feature_column_specs=[column_spec_2], + ), + ), + ) ) def test_create_model_invalid_hours_small(self): @@ -1110,20 +1308,28 @@ def test_create_model_invalid_include_exclude(self): client.auto_ml_client.create_model.assert_not_called() def test_predict_from_array(self): - data_type = mock.Mock(type_code=data_types_pb2.CATEGORY) + data_type = mock.Mock(type_code=data_types.TypeCode.CATEGORY) column_spec = mock.Mock(display_name="a", data_type=data_type) model_metadata = mock.Mock(input_feature_column_specs=[column_spec]) model = mock.Mock() model.configure_mock(tables_model_metadata=model_metadata, name="my_model") client = self.tables_client({"get_model.return_value": model}, {}) client.predict(["1"], model_name="my_model") - payload = data_items_pb2.ExamplePayload( - row=data_items_pb2.Row(values=[struct_pb2.Value(string_value="1")]) + + # append each row value separately until issue is resovled + # https://github.com/googleapis/proto-plus-python/issues/104 + row = data_items.Row() + row.values.append(struct.Value(string_value="1")) + payload = data_items.ExamplePayload(row=row) + + client.prediction_client.predict.assert_called_with( + request=automl_v1beta1.PredictRequest( + name="my_model", payload=payload, params=None + ) ) - client.prediction_client.predict.assert_called_with("my_model", payload, None) def test_predict_from_dict(self): - data_type = mock.Mock(type_code=data_types_pb2.CATEGORY) + data_type = mock.Mock(type_code=data_types.TypeCode.CATEGORY) column_spec_a = mock.Mock(display_name="a", data_type=data_type) column_spec_b = mock.Mock(display_name="b", data_type=data_type) model_metadata = mock.Mock( @@ -1133,18 +1339,23 @@ def test_predict_from_dict(self): model.configure_mock(tables_model_metadata=model_metadata, name="my_model") client = self.tables_client({"get_model.return_value": model}, {}) client.predict({"a": "1", "b": "2"}, model_name="my_model") - payload = data_items_pb2.ExamplePayload( - row=data_items_pb2.Row( - values=[ - struct_pb2.Value(string_value="1"), - struct_pb2.Value(string_value="2"), - ] + + # append each row value separately until issue is resovled + # https://github.com/googleapis/proto-plus-python/issues/104 + row = data_items.Row() + row.values.append(struct.Value(string_value="1")) + row.values.append(struct.Value(string_value="2")) + + payload = data_items.ExamplePayload(row=row) + + client.prediction_client.predict.assert_called_with( + request=automl_v1beta1.PredictRequest( + name="my_model", payload=payload, params=None ) ) - client.prediction_client.predict.assert_called_with("my_model", payload, None) def test_predict_from_dict_with_feature_importance(self): - data_type = mock.Mock(type_code=data_types_pb2.CATEGORY) + data_type = mock.Mock(type_code=data_types.TypeCode.CATEGORY) column_spec_a = mock.Mock(display_name="a", data_type=data_type) column_spec_b = mock.Mock(display_name="b", data_type=data_type) model_metadata = mock.Mock( @@ -1156,20 +1367,23 @@ def test_predict_from_dict_with_feature_importance(self): client.predict( {"a": "1", "b": "2"}, model_name="my_model", feature_importance=True ) - payload = data_items_pb2.ExamplePayload( - row=data_items_pb2.Row( - values=[ - struct_pb2.Value(string_value="1"), - struct_pb2.Value(string_value="2"), - ] - ) - ) + + # append each row value separately until issue is resovled + # https://github.com/googleapis/proto-plus-python/issues/104 + row = data_items.Row() + row.values.append(struct.Value(string_value="1")) + row.values.append(struct.Value(string_value="2")) + + payload = data_items.ExamplePayload(row=row) + client.prediction_client.predict.assert_called_with( - "my_model", payload, {"feature_importance": "true"} + request=automl_v1beta1.PredictRequest( + name="my_model", payload=payload, params={"feature_importance": "true"} + ) ) def test_predict_from_dict_missing(self): - data_type = mock.Mock(type_code=data_types_pb2.CATEGORY) + data_type = mock.Mock(type_code=data_types.TypeCode.CATEGORY) column_spec_a = mock.Mock(display_name="a", data_type=data_type) column_spec_b = mock.Mock(display_name="b", data_type=data_type) model_metadata = mock.Mock( @@ -1179,33 +1393,40 @@ def test_predict_from_dict_missing(self): model.configure_mock(tables_model_metadata=model_metadata, name="my_model") client = self.tables_client({"get_model.return_value": model}, {}) client.predict({"a": "1"}, model_name="my_model") - payload = data_items_pb2.ExamplePayload( - row=data_items_pb2.Row( - values=[ - struct_pb2.Value(string_value="1"), - struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - ] + + # append each row value separately until issue is resovled + # https://github.com/googleapis/proto-plus-python/issues/104 + row = data_items.Row() + row.values.append(struct.Value(string_value="1")) + row.values.append(struct.Value(null_value=struct.NullValue.NULL_VALUE)) + + payload = data_items.ExamplePayload(row=row) + + client.prediction_client.predict.assert_called_with( + request=automl_v1beta1.PredictRequest( + name="my_model", payload=payload, params=None ) ) - client.prediction_client.predict.assert_called_with("my_model", payload, None) def test_predict_all_types(self): - float_type = mock.Mock(type_code=data_types_pb2.FLOAT64) - timestamp_type = mock.Mock(type_code=data_types_pb2.TIMESTAMP) - string_type = mock.Mock(type_code=data_types_pb2.STRING) + float_type = mock.Mock(type_code=data_types.TypeCode.FLOAT64) + timestamp_type = mock.Mock(type_code=data_types.TypeCode.TIMESTAMP) + string_type = mock.Mock(type_code=data_types.TypeCode.STRING) array_type = mock.Mock( - type_code=data_types_pb2.ARRAY, - list_element_type=mock.Mock(type_code=data_types_pb2.FLOAT64), - ) - struct = data_types_pb2.StructType() - struct.fields["a"].CopyFrom( - data_types_pb2.DataType(type_code=data_types_pb2.CATEGORY) + type_code=data_types.TypeCode.ARRAY, + list_element_type=mock.Mock(type_code=data_types.TypeCode.FLOAT64), ) - struct.fields["b"].CopyFrom( - data_types_pb2.DataType(type_code=data_types_pb2.CATEGORY) + + struct_type = mock.Mock( + type_code=data_types.TypeCode.STRUCT, + struct_type=data_types.StructType( + fields={ + "a": data_types.DataType(type_code=data_types.TypeCode.CATEGORY), + "b": data_types.DataType(type_code=data_types.TypeCode.CATEGORY), + } + ), ) - struct_type = mock.Mock(type_code=data_types_pb2.STRUCT, struct_type=struct) - category_type = mock.Mock(type_code=data_types_pb2.CATEGORY) + category_type = mock.Mock(type_code=data_types.TypeCode.CATEGORY) column_spec_float = mock.Mock(display_name="float", data_type=float_type) column_spec_timestamp = mock.Mock( display_name="timestamp", data_type=timestamp_type @@ -1243,30 +1464,37 @@ def test_predict_all_types(self): }, model_name="my_model", ) - struct = struct_pb2.Struct() - struct.fields["a"].CopyFrom(struct_pb2.Value(string_value="label_a")) - struct.fields["b"].CopyFrom(struct_pb2.Value(string_value="label_b")) - payload = data_items_pb2.ExamplePayload( - row=data_items_pb2.Row( - values=[ - struct_pb2.Value(number_value=1.0), - struct_pb2.Value(string_value="EST"), - struct_pb2.Value(string_value="text"), - struct_pb2.Value( - list_value=struct_pb2.ListValue( - values=[struct_pb2.Value(number_value=1.0)] - ) - ), - struct_pb2.Value(struct_value=struct), - struct_pb2.Value(string_value="a"), - struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - ] + struct_pb = struct.Struct() + struct_pb.fields["a"].CopyFrom(struct.Value(string_value="label_a")) + struct_pb.fields["b"].CopyFrom(struct.Value(string_value="label_b")) + + # append each row value separately until issue is resovled + # https://github.com/googleapis/proto-plus-python/issues/104 + row = data_items.Row() + values = [ + struct.Value(number_value=1.0), + struct.Value(string_value="EST"), + struct.Value(string_value="text"), + struct.Value( + list_value=struct.ListValue(values=[struct.Value(number_value=1.0)]) + ), + struct.Value(struct_value=struct_pb), + struct.Value(string_value="a"), + struct.Value(null_value=struct.NullValue.NULL_VALUE), + ] + for v in values: + row.values.append(v) + + payload = data_items.ExamplePayload(row=row) + + client.prediction_client.predict.assert_called_with( + request=automl_v1beta1.PredictRequest( + name="my_model", payload=payload, params=None ) ) - client.prediction_client.predict.assert_called_with("my_model", payload, None) def test_predict_from_array_missing(self): - data_type = mock.Mock(type_code=data_types_pb2.CATEGORY) + data_type = mock.Mock(type_code=data_types.TypeCode.CATEGORY) column_spec = mock.Mock(display_name="a", data_type=data_type) model_metadata = mock.Mock(input_feature_column_specs=[column_spec]) model = mock.Mock() @@ -1296,10 +1524,11 @@ def test_batch_predict_pandas_dataframe(self): client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe) client.prediction_client.batch_predict.assert_called_with( - "my_model", - {"gcs_source": {"input_uris": ["gs://input"]}}, - {"gcs_destination": {"output_uri_prefix": "gs://output"}}, - {}, + request=automl_v1beta1.BatchPredictRequest( + name="my_model", + input_config={"gcs_source": {"input_uris": ["gs://input"]}}, + output_config={"gcs_destination": {"output_uri_prefix": "gs://output"}}, + ) ) def test_batch_predict_pandas_dataframe_init_gcs(self): @@ -1313,7 +1542,7 @@ def test_batch_predict_pandas_dataframe_init_gcs(self): dataframe = pandas.DataFrame({}) patch = mock.patch( - "google.cloud.automl_v1beta1.tables.tables_client.gcs_client.GcsClient", + "google.cloud.automl_v1beta1.services.tables.gcs_client.GcsClient", bucket_name="my_bucket", ) with patch as MockGcsClient: @@ -1331,10 +1560,13 @@ def test_batch_predict_pandas_dataframe_init_gcs(self): client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe) client.prediction_client.batch_predict.assert_called_with( - "my_model", - {"gcs_source": {"input_uris": ["gs://input"]}}, - {"gcs_destination": {"output_uri_prefix": "gs://output"}}, - {}, + request=automl_v1beta1.BatchPredictRequest( + name="my_model", + input_config={"gcs_source": {"input_uris": ["gs://input"]}}, + output_config={ + "gcs_destination": {"output_uri_prefix": "gs://output"} + }, + ) ) def test_batch_predict_gcs(self): @@ -1345,10 +1577,11 @@ def test_batch_predict_gcs(self): gcs_output_uri_prefix="gs://output", ) client.prediction_client.batch_predict.assert_called_with( - "my_model", - {"gcs_source": {"input_uris": ["gs://input"]}}, - {"gcs_destination": {"output_uri_prefix": "gs://output"}}, - {}, + request=automl_v1beta1.BatchPredictRequest( + name="my_model", + input_config={"gcs_source": {"input_uris": ["gs://input"]}}, + output_config={"gcs_destination": {"output_uri_prefix": "gs://output"}}, + ) ) def test_batch_predict_bigquery(self): @@ -1359,10 +1592,11 @@ def test_batch_predict_bigquery(self): bigquery_output_uri="bq://output", ) client.prediction_client.batch_predict.assert_called_with( - "my_model", - {"bigquery_source": {"input_uri": "bq://input"}}, - {"bigquery_destination": {"output_uri": "bq://output"}}, - {}, + request=automl_v1beta1.BatchPredictRequest( + name="my_model", + input_config={"bigquery_source": {"input_uri": "bq://input"}}, + output_config={"bigquery_destination": {"output_uri": "bq://output"}}, + ) ) def test_batch_predict_mixed(self): @@ -1373,10 +1607,11 @@ def test_batch_predict_mixed(self): bigquery_output_uri="bq://output", ) client.prediction_client.batch_predict.assert_called_with( - "my_model", - {"gcs_source": {"input_uris": ["gs://input"]}}, - {"bigquery_destination": {"output_uri": "bq://output"}}, - {}, + request=automl_v1beta1.BatchPredictRequest( + name="my_model", + input_config={"gcs_source": {"input_uris": ["gs://input"]}}, + output_config={"bigquery_destination": {"output_uri": "bq://output"}}, + ) ) def test_batch_predict_missing_input_gcs_uri(self): @@ -1441,10 +1676,10 @@ def test_batch_predict_no_model(self): def test_auto_ml_client_credentials(self): credentials_mock = mock.Mock() patch_auto_ml_client = mock.patch( - "google.cloud.automl_v1beta1.gapic.auto_ml_client.AutoMlClient" + "google.cloud.automl_v1beta1.services.tables.tables_client.AutoMlClient" ) with patch_auto_ml_client as MockAutoMlClient: - client = automl_v1beta1.TablesClient(credentials=credentials_mock) + automl_v1beta1.TablesClient(credentials=credentials_mock) _, auto_ml_client_kwargs = MockAutoMlClient.call_args assert "credentials" in auto_ml_client_kwargs assert auto_ml_client_kwargs["credentials"] == credentials_mock @@ -1452,10 +1687,10 @@ def test_auto_ml_client_credentials(self): def test_prediction_client_credentials(self): credentials_mock = mock.Mock() patch_prediction_client = mock.patch( - "google.cloud.automl_v1beta1.gapic.prediction_service_client.PredictionServiceClient" + "google.cloud.automl_v1beta1.services.tables.tables_client.PredictionServiceClient" ) with patch_prediction_client as MockPredictionClient: - client = automl_v1beta1.TablesClient(credentials=credentials_mock) + automl_v1beta1.TablesClient(credentials=credentials_mock) _, prediction_client_kwargs = MockPredictionClient.call_args assert "credentials" in prediction_client_kwargs assert prediction_client_kwargs["credentials"] == credentials_mock @@ -1463,10 +1698,10 @@ def test_prediction_client_credentials(self): def test_prediction_client_client_info(self): client_info_mock = mock.Mock() patch_prediction_client = mock.patch( - "google.cloud.automl_v1beta1.gapic.prediction_service_client.PredictionServiceClient" + "google.cloud.automl_v1beta1.services.tables.tables_client.PredictionServiceClient" ) with patch_prediction_client as MockPredictionClient: - client = automl_v1beta1.TablesClient(client_info=client_info_mock) + automl_v1beta1.TablesClient(client_info=client_info_mock) _, prediction_client_kwargs = MockPredictionClient.call_args assert "client_info" in prediction_client_kwargs assert prediction_client_kwargs["client_info"] == client_info_mock