From 95f859f87291023820b70364cde67c8be6a052b9 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Tue, 23 Jun 2020 05:33:11 -0700 Subject: [PATCH 1/9] Integrate Python GAPIC Microgenerator in googleapis. This PR uses using documentai as an example. Depends on https://github.com/googleapis/gapic-generator-python/pull/402 PiperOrigin-RevId: 309824146 Source-Author: Google APIs Source-Date: Mon May 4 15:06:44 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: e0f9d9e1f9de890db765be46f45ca8490723e3eb Source-Link: https://github.com/googleapis/googleapis/commit/e0f9d9e1f9de890db765be46f45ca8490723e3eb --- .coveragerc | 16 + .flake8 | 18 + .github/ISSUE_TEMPLATE/bug_report.md | 3 +- .gitignore | 2 + .kokoro/publish-docs.sh | 2 - .kokoro/release.sh | 2 - .kokoro/samples/lint/common.cfg | 34 + .kokoro/samples/lint/continuous.cfg | 6 + .kokoro/samples/lint/periodic.cfg | 6 + .kokoro/samples/lint/presubmit.cfg | 6 + .kokoro/samples/python3.6/common.cfg | 34 + .kokoro/samples/python3.6/continuous.cfg | 7 + .kokoro/samples/python3.6/periodic.cfg | 6 + .kokoro/samples/python3.6/presubmit.cfg | 6 + .kokoro/samples/python3.7/common.cfg | 34 + .kokoro/samples/python3.7/continuous.cfg | 6 + .kokoro/samples/python3.7/periodic.cfg | 6 + .kokoro/samples/python3.7/presubmit.cfg | 6 + .kokoro/samples/python3.8/common.cfg | 34 + .kokoro/samples/python3.8/continuous.cfg | 6 + .kokoro/samples/python3.8/periodic.cfg | 6 + .kokoro/samples/python3.8/presubmit.cfg | 6 + .kokoro/test-samples.sh | 104 ++ CONTRIBUTING.rst | 15 +- MANIFEST.in | 19 + docs/conf.py | 9 +- docs/multiprocessing.rst | 7 + google/cloud/automl.py | 7 +- google/cloud/automl_v1/__init__.py | 15 +- .../cloud/automl_v1/gapic/auto_ml_client.py | 481 +++--- .../automl_v1/gapic/auto_ml_client_config.py | 48 +- google/cloud/automl_v1/gapic/enums.py | 16 +- .../gapic/prediction_service_client.py | 203 ++- .../transports/auto_ml_grpc_transport.py | 106 +- .../prediction_service_grpc_transport.py | 70 +- .../automl_v1/proto/annotation_payload.proto | 3 +- .../automl_v1/proto/annotation_payload_pb2.py | 31 +- .../automl_v1/proto/annotation_spec.proto | 13 +- .../automl_v1/proto/annotation_spec_pb2.py | 47 +- .../automl_v1/proto/classification.proto | 25 +- .../automl_v1/proto/classification_pb2.py | 116 +- google/cloud/automl_v1/proto/data_items.proto | 38 +- .../cloud/automl_v1/proto/data_items_pb2.py | 140 +- google/cloud/automl_v1/proto/dataset.proto | 8 +- google/cloud/automl_v1/proto/dataset_pb2.py | 60 +- google/cloud/automl_v1/proto/detection.proto | 9 +- google/cloud/automl_v1/proto/detection_pb2.py | 79 +- google/cloud/automl_v1/proto/geometry.proto | 3 +- google/cloud/automl_v1/proto/geometry_pb2.py | 37 +- google/cloud/automl_v1/proto/image.proto | 64 +- google/cloud/automl_v1/proto/image_pb2.py | 141 +- google/cloud/automl_v1/proto/io.proto | 708 ++++++-- google/cloud/automl_v1/proto/io_pb2.py | 1363 +++++++++------ google/cloud/automl_v1/proto/model.proto | 5 +- .../automl_v1/proto/model_evaluation.proto | 36 +- .../automl_v1/proto/model_evaluation_pb2.py | 69 +- google/cloud/automl_v1/proto/model_pb2.py | 61 +- google/cloud/automl_v1/proto/operations.proto | 3 +- .../cloud/automl_v1/proto/operations_pb2.py | 181 +- .../automl_v1/proto/prediction_service.proto | 306 ++-- .../automl_v1/proto/prediction_service_pb2.py | 351 ++-- .../proto/prediction_service_pb2_grpc.py | 70 +- google/cloud/automl_v1/proto/service.proto | 370 +++-- google/cloud/automl_v1/proto/service_pb2.py | 647 ++++---- .../cloud/automl_v1/proto/service_pb2_grpc.py | 16 +- google/cloud/automl_v1/proto/text.proto | 28 +- .../automl_v1/proto/text_extraction.proto | 3 +- .../automl_v1/proto/text_extraction_pb2.py | 47 +- google/cloud/automl_v1/proto/text_pb2.py | 90 +- .../cloud/automl_v1/proto/text_segment.proto | 3 +- .../cloud/automl_v1/proto/text_segment_pb2.py | 29 +- .../automl_v1/proto/text_sentiment.proto | 11 +- .../automl_v1/proto/text_sentiment_pb2.py | 54 +- .../cloud/automl_v1/proto/translation.proto | 11 +- .../cloud/automl_v1/proto/translation_pb2.py | 75 +- google/cloud/automl_v1beta1/__init__.py | 12 +- .../automl_v1beta1/gapic/auto_ml_client.py | 1474 +++++++++-------- .../gapic/auto_ml_client_config.py | 116 +- google/cloud/automl_v1beta1/gapic/enums.py | 38 +- .../gapic/prediction_service_client.py | 115 +- .../transports/auto_ml_grpc_transport.py | 291 ++-- .../prediction_service_grpc_transport.py | 8 +- .../proto/annotation_payload.proto | 3 +- .../proto/annotation_payload_pb2.py | 31 +- .../proto/annotation_spec.proto | 13 +- .../proto/annotation_spec_pb2.py | 47 +- .../automl_v1beta1/proto/classification.proto | 14 +- .../proto/classification_pb2.py | 135 +- .../automl_v1beta1/proto/column_spec.proto | 9 +- .../automl_v1beta1/proto/column_spec_pb2.py | 70 +- .../automl_v1beta1/proto/data_items.proto | 40 +- .../automl_v1beta1/proto/data_items_pb2.py | 203 ++- .../automl_v1beta1/proto/data_stats.proto | 3 +- .../automl_v1beta1/proto/data_stats_pb2.py | 231 ++- .../automl_v1beta1/proto/data_types.proto | 3 +- .../automl_v1beta1/proto/data_types_pb2.py | 74 +- .../cloud/automl_v1beta1/proto/dataset.proto | 9 +- .../cloud/automl_v1beta1/proto/dataset_pb2.py | 46 +- .../automl_v1beta1/proto/detection.proto | 3 +- .../automl_v1beta1/proto/detection_pb2.py | 129 +- .../cloud/automl_v1beta1/proto/geometry.proto | 3 +- .../automl_v1beta1/proto/geometry_pb2.py | 37 +- google/cloud/automl_v1beta1/proto/image.proto | 65 +- .../cloud/automl_v1beta1/proto/image_pb2.py | 159 +- google/cloud/automl_v1beta1/proto/io.proto | 3 +- google/cloud/automl_v1beta1/proto/io_pb2.py | 962 +++++------ google/cloud/automl_v1beta1/proto/model.proto | 9 +- .../proto/model_evaluation.proto | 9 +- .../proto/model_evaluation_pb2.py | 70 +- .../cloud/automl_v1beta1/proto/model_pb2.py | 46 +- .../automl_v1beta1/proto/operations.proto | 31 +- .../automl_v1beta1/proto/operations_pb2.py | 271 +-- .../proto/prediction_service.proto | 79 +- .../proto/prediction_service_pb2.py | 353 ++-- .../proto/prediction_service_pb2_grpc.py | 8 +- .../cloud/automl_v1beta1/proto/ranges.proto | 3 +- .../cloud/automl_v1beta1/proto/ranges_pb2.py | 23 +- .../automl_v1beta1/proto/regression.proto | 3 +- .../automl_v1beta1/proto/regression_pb2.py | 23 +- .../cloud/automl_v1beta1/proto/service.proto | 284 +++- .../cloud/automl_v1beta1/proto/service_pb2.py | 854 +++++----- .../automl_v1beta1/proto/service_pb2_grpc.py | 3 +- .../automl_v1beta1/proto/table_spec.proto | 9 +- .../automl_v1beta1/proto/table_spec_pb2.py | 58 +- .../cloud/automl_v1beta1/proto/tables.proto | 15 +- .../cloud/automl_v1beta1/proto/tables_pb2.py | 294 ++-- .../cloud/automl_v1beta1/proto/temporal.proto | 3 +- .../automl_v1beta1/proto/temporal_pb2.py | 23 +- google/cloud/automl_v1beta1/proto/text.proto | 28 +- .../proto/text_extraction.proto | 3 +- .../proto/text_extraction_pb2.py | 47 +- google/cloud/automl_v1beta1/proto/text_pb2.py | 90 +- .../automl_v1beta1/proto/text_segment.proto | 3 +- .../automl_v1beta1/proto/text_segment_pb2.py | 29 +- .../automl_v1beta1/proto/text_sentiment.proto | 3 +- .../proto/text_sentiment_pb2.py | 51 +- .../automl_v1beta1/proto/translation.proto | 8 +- .../automl_v1beta1/proto/translation_pb2.py | 87 +- google/cloud/automl_v1beta1/proto/video.proto | 3 +- .../cloud/automl_v1beta1/proto/video_pb2.py | 51 +- noxfile.py | 35 +- scripts/decrypt-secrets.sh | 33 + scripts/readme-gen/readme_gen.py | 66 + scripts/readme-gen/templates/README.tmpl.rst | 87 + scripts/readme-gen/templates/auth.tmpl.rst | 9 + .../templates/auth_api_key.tmpl.rst | 14 + .../templates/install_deps.tmpl.rst | 29 + .../templates/install_portaudio.tmpl.rst | 35 + setup.cfg | 16 + synth.metadata | 27 +- testing/.gitignore | 3 + tests/unit/gapic/v1/test_auto_ml_client_v1.py | 284 ++-- .../v1beta1/test_auto_ml_client_v1beta1.py | 910 +++++----- .../test_prediction_service_client_v1beta1.py | 11 +- 154 files changed, 8771 insertions(+), 6711 deletions(-) create mode 100644 .kokoro/samples/lint/common.cfg create mode 100644 .kokoro/samples/lint/continuous.cfg create mode 100644 .kokoro/samples/lint/periodic.cfg create mode 100644 .kokoro/samples/lint/presubmit.cfg create mode 100644 .kokoro/samples/python3.6/common.cfg create mode 100644 .kokoro/samples/python3.6/continuous.cfg create mode 100644 .kokoro/samples/python3.6/periodic.cfg create mode 100644 .kokoro/samples/python3.6/presubmit.cfg create mode 100644 .kokoro/samples/python3.7/common.cfg create mode 100644 .kokoro/samples/python3.7/continuous.cfg create mode 100644 .kokoro/samples/python3.7/periodic.cfg create mode 100644 .kokoro/samples/python3.7/presubmit.cfg create mode 100644 .kokoro/samples/python3.8/common.cfg create mode 100644 .kokoro/samples/python3.8/continuous.cfg create mode 100644 .kokoro/samples/python3.8/periodic.cfg create mode 100644 .kokoro/samples/python3.8/presubmit.cfg create mode 100755 .kokoro/test-samples.sh create mode 100644 docs/multiprocessing.rst create mode 100755 scripts/decrypt-secrets.sh create mode 100644 scripts/readme-gen/readme_gen.py create mode 100644 scripts/readme-gen/templates/README.tmpl.rst create mode 100644 scripts/readme-gen/templates/auth.tmpl.rst create mode 100644 scripts/readme-gen/templates/auth_api_key.tmpl.rst create mode 100644 scripts/readme-gen/templates/install_deps.tmpl.rst create mode 100644 scripts/readme-gen/templates/install_portaudio.tmpl.rst create mode 100644 testing/.gitignore diff --git a/.coveragerc b/.coveragerc index b178b094..dd39c854 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,3 +1,19 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! [run] branch = True diff --git a/.flake8 b/.flake8 index 0268ecc9..ed931638 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,19 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! [flake8] ignore = E203, E266, E501, W503 @@ -5,6 +21,8 @@ exclude = # Exclude generated code. **/proto/** **/gapic/** + **/services/** + **/types/** *_pb2.py # Standard linting exemptions. diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 3127a03a..4836b9eb 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -11,8 +11,7 @@ Thanks for stopping by to let us know something could be better! Please run down the following list and make sure you've tried the usual "quick fixes": - Search the issues already opened: https://github.com/googleapis/python-automl/issues - - Search the issues on our "catch-all" repository: https://github.com/googleapis/google-cloud-python - - Search StackOverflow: http://stackoverflow.com/questions/tagged/google-cloud-platform+python + - Search StackOverflow: https://stackoverflow.com/questions/tagged/google-cloud-platform+python If you are still having issues, please be sure to include as much information as possible: diff --git a/.gitignore b/.gitignore index 3fb06e09..b87e1ed5 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ dist build eggs +.eggs parts bin var @@ -49,6 +50,7 @@ bigquery/docs/generated # Virtual environment env/ coverage.xml +sponge_log.xml # System test environment variables. system_tests/local_test_setup diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh index b157f117..7aff8a9e 100755 --- a/.kokoro/publish-docs.sh +++ b/.kokoro/publish-docs.sh @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -#!/bin/bash - set -eo pipefail # Disable buffering, so that the logs stream through. diff --git a/.kokoro/release.sh b/.kokoro/release.sh index ba265923..6f8265f3 100755 --- a/.kokoro/release.sh +++ b/.kokoro/release.sh @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -#!/bin/bash - set -eo pipefail # Start the releasetool reporter diff --git a/.kokoro/samples/lint/common.cfg b/.kokoro/samples/lint/common.cfg new file mode 100644 index 00000000..c6585101 --- /dev/null +++ b/.kokoro/samples/lint/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "lint" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-automl/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-automl/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/lint/continuous.cfg b/.kokoro/samples/lint/continuous.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/lint/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/lint/periodic.cfg b/.kokoro/samples/lint/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/lint/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/lint/presubmit.cfg b/.kokoro/samples/lint/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/lint/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.6/common.cfg b/.kokoro/samples/python3.6/common.cfg new file mode 100644 index 00000000..a67eebd6 --- /dev/null +++ b/.kokoro/samples/python3.6/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.6" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-automl/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-automl/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.6/continuous.cfg b/.kokoro/samples/python3.6/continuous.cfg new file mode 100644 index 00000000..7218af14 --- /dev/null +++ b/.kokoro/samples/python3.6/continuous.cfg @@ -0,0 +1,7 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + diff --git a/.kokoro/samples/python3.6/periodic.cfg b/.kokoro/samples/python3.6/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/python3.6/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.6/presubmit.cfg b/.kokoro/samples/python3.6/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.6/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.7/common.cfg new file mode 100644 index 00000000..6fa14a09 --- /dev/null +++ b/.kokoro/samples/python3.7/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.7" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-automl/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-automl/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.7/continuous.cfg b/.kokoro/samples/python3.7/continuous.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.7/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.7/periodic.cfg b/.kokoro/samples/python3.7/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/python3.7/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.7/presubmit.cfg b/.kokoro/samples/python3.7/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.7/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.8/common.cfg new file mode 100644 index 00000000..a74006d4 --- /dev/null +++ b/.kokoro/samples/python3.8/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.8" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-automl/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-automl/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.8/continuous.cfg b/.kokoro/samples/python3.8/continuous.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.8/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.8/periodic.cfg b/.kokoro/samples/python3.8/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/python3.8/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.8/presubmit.cfg b/.kokoro/samples/python3.8/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.8/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh new file mode 100755 index 00000000..14c39db4 --- /dev/null +++ b/.kokoro/test-samples.sh @@ -0,0 +1,104 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# `-e` enables the script to automatically fail when a command fails +# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero +set -eo pipefail +# Enables `**` to include files nested inside sub-folders +shopt -s globstar + +cd github/python-automl + +# Run periodic samples tests at latest release +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + LATEST_RELEASE=$(git describe --abbrev=0 --tags) + git checkout $LATEST_RELEASE +fi + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +# Debug: show build environment +env | grep KOKORO + +# Install nox +python3.6 -m pip install --upgrade --quiet nox + +# Use secrets acessor service account to get secrets +if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then + gcloud auth activate-service-account \ + --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \ + --project="cloud-devrel-kokoro-resources" +fi + +# This script will create 3 files: +# - testing/test-env.sh +# - testing/service-account.json +# - testing/client-secrets.json +./scripts/decrypt-secrets.sh + +source ./testing/test-env.sh +export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json + +# For cloud-run session, we activate the service account for gcloud sdk. +gcloud auth activate-service-account \ + --key-file "${GOOGLE_APPLICATION_CREDENTIALS}" + +export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json + +echo -e "\n******************** TESTING PROJECTS ********************" + +# Switch to 'fail at end' to allow all tests to complete before exiting. +set +e +# Use RTN to return a non-zero value if the test fails. +RTN=0 +ROOT=$(pwd) +# Find all requirements.txt in the samples directory (may break on whitespace). +for file in samples/**/requirements.txt; do + cd "$ROOT" + # Navigate to the project folder. + file=$(dirname "$file") + cd "$file" + + echo "------------------------------------------------------------" + echo "- testing $file" + echo "------------------------------------------------------------" + + # Use nox to execute the tests for the project. + python3.6 -m nox -s "$RUN_TESTS_SESSION" + EXIT=$? + + # If this is a periodic build, send the test log to the Build Cop Bot. + # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/buildcop. + if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + chmod +x $KOKORO_GFILE_DIR/linux_amd64/buildcop + $KOKORO_GFILE_DIR/linux_amd64/buildcop + fi + + if [[ $EXIT -ne 0 ]]; then + RTN=1 + echo -e "\n Testing failed: Nox returned a non-zero exit code. \n" + else + echo -e "\n Testing completed.\n" + fi + +done +cd "$ROOT" + +# Workaround for Kokoro permissions issue: delete secrets +rm testing/{test-env.sh,client-secrets.json,service-account.json} + +exit "$RTN" \ No newline at end of file diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 1e9731cb..6d6bd916 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -22,7 +22,7 @@ In order to add a feature: documentation. - The feature must work fully on the following CPython versions: 2.7, - 3.5, 3.6, and 3.7 on both UNIX and Windows. + 3.5, 3.6, 3.7 and 3.8 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -214,26 +214,18 @@ We support: - `Python 3.5`_ - `Python 3.6`_ - `Python 3.7`_ +- `Python 3.8`_ .. _Python 3.5: https://docs.python.org/3.5/ .. _Python 3.6: https://docs.python.org/3.6/ .. _Python 3.7: https://docs.python.org/3.7/ +.. _Python 3.8: https://docs.python.org/3.8/ Supported versions can be found in our ``noxfile.py`` `config`_. .. _config: https://github.com/googleapis/python-automl/blob/master/noxfile.py -We explicitly decided not to support `Python 2.5`_ due to `decreased usage`_ -and lack of continuous integration `support`_. - -.. _Python 2.5: https://docs.python.org/2.5/ -.. _decreased usage: https://caremad.io/2013/10/a-look-at-pypi-downloads/ -.. _support: https://blog.travis-ci.com/2013-11-18-upcoming-build-environment-updates/ - -We have `dropped 2.6`_ as a supported version as well since Python 2.6 is no -longer supported by the core development team. - Python 2.7 support is deprecated. All code changes should maintain Python 2.7 compatibility until January 1, 2020. We also explicitly decided to support Python 3 beginning with version @@ -247,7 +239,6 @@ We also explicitly decided to support Python 3 beginning with version .. _prominent: https://docs.djangoproject.com/en/1.9/faq/install/#what-python-version-can-i-use-with-django .. _projects: http://flask.pocoo.org/docs/0.10/python3/ .. _Unicode literal support: https://www.python.org/dev/peps/pep-0414/ -.. _dropped 2.6: https://github.com/googleapis/google-cloud-python/issues/995 ********** Versioning diff --git a/MANIFEST.in b/MANIFEST.in index cd011be2..e9e29d12 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,25 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! include README.rst LICENSE recursive-include google *.json *.proto recursive-include tests * global-exclude *.py[co] global-exclude __pycache__ + +# Exclude scripts for samples readmegen +prune scripts/readme-gen \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index 0b6aebeb..a2e8ab1e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -38,21 +38,18 @@ "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", + "recommonmark", ] # autodoc/autosummary flags autoclass_content = "both" -autodoc_default_flags = ["members"] +autodoc_default_options = {"members": True} autosummary_generate = True # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] @@ -340,7 +337,7 @@ intersphinx_mapping = { "python": ("http://python.readthedocs.org/en/latest/", None), "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,), "grpc": ("https://grpc.io/grpc/python/", None), } diff --git a/docs/multiprocessing.rst b/docs/multiprocessing.rst new file mode 100644 index 00000000..1cb29d4c --- /dev/null +++ b/docs/multiprocessing.rst @@ -0,0 +1,7 @@ +.. note:: + + Because this client uses :mod:`grpcio` library, it is safe to + share instances across threads. In multiprocessing scenarios, the best + practice is to create client instances *after* the invocation of + :func:`os.fork` by :class:`multiprocessing.Pool` or + :class:`multiprocessing.Process`. diff --git a/google/cloud/automl.py b/google/cloud/automl.py index c1dc4ee7..9dc44cde 100644 --- a/google/cloud/automl.py +++ b/google/cloud/automl.py @@ -23,4 +23,9 @@ from google.cloud.automl_v1 import types -__all__ = ("enums", "types", "AutoMlClient", "PredictionServiceClient") +__all__ = ( + "enums", + "types", + "PredictionServiceClient", + "AutoMlClient", +) diff --git a/google/cloud/automl_v1/__init__.py b/google/cloud/automl_v1/__init__.py index a663f191..361bad1e 100644 --- a/google/cloud/automl_v1/__init__.py +++ b/google/cloud/automl_v1/__init__.py @@ -34,14 +34,19 @@ warnings.warn(message, DeprecationWarning) -class AutoMlClient(auto_ml_client.AutoMlClient): - __doc__ = auto_ml_client.AutoMlClient.__doc__ +class PredictionServiceClient(prediction_service_client.PredictionServiceClient): + __doc__ = prediction_service_client.PredictionServiceClient.__doc__ enums = enums -class PredictionServiceClient(prediction_service_client.PredictionServiceClient): - __doc__ = prediction_service_client.PredictionServiceClient.__doc__ +class AutoMlClient(auto_ml_client.AutoMlClient): + __doc__ = auto_ml_client.AutoMlClient.__doc__ enums = enums -__all__ = ("enums", "types", "AutoMlClient", "PredictionServiceClient") +__all__ = ( + "enums", + "types", + "PredictionServiceClient", + "AutoMlClient", +) diff --git a/google/cloud/automl_v1/gapic/auto_ml_client.py b/google/cloud/automl_v1/gapic/auto_ml_client.py index c1f6ed3d..6640df12 100644 --- a/google/cloud/automl_v1/gapic/auto_ml_client.py +++ b/google/cloud/automl_v1/gapic/auto_ml_client.py @@ -38,12 +38,15 @@ from google.cloud.automl_v1.gapic import enums from google.cloud.automl_v1.gapic.transports import auto_ml_grpc_transport from google.cloud.automl_v1.proto import annotation_spec_pb2 +from google.cloud.automl_v1.proto import data_items_pb2 from google.cloud.automl_v1.proto import dataset_pb2 from google.cloud.automl_v1.proto import image_pb2 from google.cloud.automl_v1.proto import io_pb2 from google.cloud.automl_v1.proto import model_evaluation_pb2 from google.cloud.automl_v1.proto import model_pb2 from google.cloud.automl_v1.proto import operations_pb2 as proto_operations_pb2 +from google.cloud.automl_v1.proto import prediction_service_pb2 +from google.cloud.automl_v1.proto import prediction_service_pb2_grpc from google.cloud.automl_v1.proto import service_pb2 from google.cloud.automl_v1.proto import service_pb2_grpc from google.longrunning import operations_pb2 as longrunning_operations_pb2 @@ -51,7 +54,7 @@ from google.protobuf import field_mask_pb2 -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl").version +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl",).version class AutoMlClient(object): @@ -69,7 +72,7 @@ class AutoMlClient(object): Currently the only supported ``location_id`` is "us-central1". On any input that is documented to expect a string parameter in - snake\_case or kebab-case, either of those cases is accepted. + snake_case or kebab-case, either of those cases is accepted. """ SERVICE_ADDRESS = "automl.googleapis.com:443" @@ -237,12 +240,12 @@ def __init__( self.transport = transport else: self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials + address=api_endpoint, channel=channel, credentials=credentials, ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION + gapic_version=_GAPIC_LIBRARY_VERSION, ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION @@ -253,7 +256,7 @@ def __init__( # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] + client_config["interfaces"][self._INTERFACE_NAME], ) # Save a dictionary of cached API call functions. @@ -263,28 +266,26 @@ def __init__( self._inner_api_calls = {} # Service calls - def create_dataset( + def delete_dataset( self, - parent, - dataset, + name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Creates a dataset. + Deletes a dataset and all of its contents. Returns empty response in + the ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. Example: >>> from google.cloud import automl_v1 >>> >>> client = automl_v1.AutoMlClient() >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') - >>> - >>> # TODO: Initialize `dataset`: - >>> dataset = {} + >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> - >>> response = client.create_dataset(parent, dataset) + >>> response = client.delete_dataset(name) >>> >>> def callback(operation_future): ... # Handle result. @@ -296,11 +297,93 @@ def create_dataset( >>> metadata = response.metadata() Args: - parent (str): The resource name of the project to create the dataset for. - dataset (Union[dict, ~google.cloud.automl_v1.types.Dataset]): The dataset to create. + name (str): Required. The resource name of the dataset to delete. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.Dataset` + Returns: + A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "delete_dataset" not in self._inner_api_calls: + self._inner_api_calls[ + "delete_dataset" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_dataset, + default_retry=self._method_configs["DeleteDataset"].retry, + default_timeout=self._method_configs["DeleteDataset"].timeout, + client_info=self._client_info, + ) + + request = service_pb2.DeleteDatasetRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["delete_dataset"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, + ) + + def delete_model( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Deletes a model. Returns ``google.protobuf.Empty`` in the + ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. + + Example: + >>> from google.cloud import automl_v1 + >>> + >>> client = automl_v1.AutoMlClient() + >>> + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') + >>> + >>> response = client.delete_model(name) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + name (str): Required. Resource name of the model being deleted. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -321,22 +404,22 @@ def create_dataset( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "create_dataset" not in self._inner_api_calls: + if "delete_model" not in self._inner_api_calls: self._inner_api_calls[ - "create_dataset" + "delete_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_dataset, - default_retry=self._method_configs["CreateDataset"].retry, - default_timeout=self._method_configs["CreateDataset"].timeout, + self.transport.delete_model, + default_retry=self._method_configs["DeleteModel"].retry, + default_timeout=self._method_configs["DeleteModel"].timeout, client_info=self._client_info, ) - request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset) + request = service_pb2.DeleteModelRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -345,49 +428,54 @@ def create_dataset( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["create_dataset"]( + operation = self._inner_api_calls["delete_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, - dataset_pb2.Dataset, + empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) - def update_dataset( + def create_dataset( self, + parent, dataset, - update_mask, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Updates a dataset. + Creates a dataset. Example: >>> from google.cloud import automl_v1 >>> >>> client = automl_v1.AutoMlClient() >>> + >>> parent = client.location_path('[PROJECT]', '[LOCATION]') + >>> >>> # TODO: Initialize `dataset`: >>> dataset = {} >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} + >>> response = client.create_dataset(parent, dataset) >>> - >>> response = client.update_dataset(dataset, update_mask) + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - dataset (Union[dict, ~google.cloud.automl_v1.types.Dataset]): The dataset which replaces the resource on the server. + parent (str): Required. The resource name of the project to create the dataset for. + dataset (Union[dict, ~google.cloud.automl_v1.types.Dataset]): Required. The dataset to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1.types.Dataset` - update_mask (Union[dict, ~google.cloud.automl_v1.types.FieldMask]): Required. The update mask applies to the resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -398,7 +486,7 @@ def update_dataset( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1.types.Dataset` instance. + A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -408,24 +496,22 @@ def update_dataset( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "update_dataset" not in self._inner_api_calls: + if "create_dataset" not in self._inner_api_calls: self._inner_api_calls[ - "update_dataset" + "create_dataset" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_dataset, - default_retry=self._method_configs["UpdateDataset"].retry, - default_timeout=self._method_configs["UpdateDataset"].timeout, + self.transport.create_dataset, + default_retry=self._method_configs["CreateDataset"].retry, + default_timeout=self._method_configs["CreateDataset"].timeout, client_info=self._client_info, ) - request = service_pb2.UpdateDatasetRequest( - dataset=dataset, update_mask=update_mask - ) + request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("dataset.name", dataset.name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -434,9 +520,15 @@ def update_dataset( ) metadata.append(routing_metadata) - return self._inner_api_calls["update_dataset"]( + operation = self._inner_api_calls["create_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + dataset_pb2.Dataset, + metadata_type=proto_operations_pb2.OperationMetadata, + ) def get_dataset( self, @@ -458,7 +550,7 @@ def get_dataset( >>> response = client.get_dataset(name) Args: - name (str): The resource name of the dataset to retrieve. + name (str): Required. The resource name of the dataset to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -489,7 +581,7 @@ def get_dataset( client_info=self._client_info, ) - request = service_pb2.GetDatasetRequest(name=name) + request = service_pb2.GetDatasetRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -541,15 +633,15 @@ def list_datasets( ... pass Args: - parent (str): The resource name of the project from which to list datasets. + parent (str): Required. The resource name of the project from which to list datasets. filter_ (str): An expression for filtering the results of the request. - ``dataset_metadata`` - for existence of the case (e.g. - image\_classification\_dataset\_metadata:\*). Some examples of using - the filter are: + image_classification_dataset_metadata:*). Some examples of using the + filter are: - ``translation_dataset_metadata:*`` --> The dataset has - translation\_dataset\_metadata. + translation_dataset_metadata. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page @@ -589,7 +681,7 @@ def list_datasets( ) request = service_pb2.ListDatasetsRequest( - parent=parent, filter=filter_, page_size=page_size + parent=parent, filter=filter_, page_size=page_size, ) if metadata is None: metadata = [] @@ -619,38 +711,39 @@ def list_datasets( ) return iterator - def delete_dataset( + def update_dataset( self, - name, + dataset, + update_mask, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Deletes a dataset and all of its contents. Returns empty response in the - ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. + Updates a dataset. Example: >>> from google.cloud import automl_v1 >>> >>> client = automl_v1.AutoMlClient() >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') - >>> - >>> response = client.delete_dataset(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() + >>> # TODO: Initialize `dataset`: + >>> dataset = {} >>> - >>> response.add_done_callback(callback) + >>> # TODO: Initialize `update_mask`: + >>> update_mask = {} >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.update_dataset(dataset, update_mask) Args: - name (str): The resource name of the dataset to delete. + dataset (Union[dict, ~google.cloud.automl_v1.types.Dataset]): Required. The dataset which replaces the resource on the server. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1.types.Dataset` + update_mask (Union[dict, ~google.cloud.automl_v1.types.FieldMask]): Required. The update mask applies to the resource. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -661,7 +754,7 @@ def delete_dataset( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. + A :class:`~google.cloud.automl_v1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -671,22 +764,24 @@ def delete_dataset( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "delete_dataset" not in self._inner_api_calls: + if "update_dataset" not in self._inner_api_calls: self._inner_api_calls[ - "delete_dataset" + "update_dataset" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_dataset, - default_retry=self._method_configs["DeleteDataset"].retry, - default_timeout=self._method_configs["DeleteDataset"].timeout, + self.transport.update_dataset, + default_retry=self._method_configs["UpdateDataset"].retry, + default_timeout=self._method_configs["UpdateDataset"].timeout, client_info=self._client_info, ) - request = service_pb2.DeleteDatasetRequest(name=name) + request = service_pb2.UpdateDatasetRequest( + dataset=dataset, update_mask=update_mask, + ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("dataset.name", dataset.name)] except AttributeError: pass else: @@ -695,15 +790,9 @@ def delete_dataset( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["delete_dataset"]( + return self._inner_api_calls["update_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) def import_data( self, @@ -714,7 +803,14 @@ def import_data( metadata=None, ): """ - Imports data into a dataset. + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A ``schema_inference_version`` parameter must be explicitly set. + Returns an empty response in the ``response`` field when it + completes. Example: >>> from google.cloud import automl_v1 @@ -775,7 +871,7 @@ def import_data( client_info=self._client_info, ) - request = service_pb2.ImportDataRequest(name=name, input_config=input_config) + request = service_pb2.ImportDataRequest(name=name, input_config=input_config,) if metadata is None: metadata = [] metadata = list(metadata) @@ -808,8 +904,8 @@ def export_data( metadata=None, ): """ - Exports dataset's data to the provided output location. Returns an empty - response in the ``response`` field when it completes. + Exports dataset's data to the provided output location. Returns an + empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1 @@ -868,7 +964,7 @@ def export_data( client_info=self._client_info, ) - request = service_pb2.ExportDataRequest(name=name, output_config=output_config) + request = service_pb2.ExportDataRequest(name=name, output_config=output_config,) if metadata is None: metadata = [] metadata = list(metadata) @@ -912,7 +1008,7 @@ def get_annotation_spec( >>> response = client.get_annotation_spec(name) Args: - name (str): The resource name of the annotation spec to retrieve. + name (str): Required. The resource name of the annotation spec to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -943,7 +1039,7 @@ def get_annotation_spec( client_info=self._client_info, ) - request = service_pb2.GetAnnotationSpecRequest(name=name) + request = service_pb2.GetAnnotationSpecRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -997,8 +1093,8 @@ def create_model( >>> metadata = response.metadata() Args: - parent (str): Resource name of the parent project where the model is being created. - model (Union[dict, ~google.cloud.automl_v1.types.Model]): The model to create. + parent (str): Required. Resource name of the parent project where the model is being created. + model (Union[dict, ~google.cloud.automl_v1.types.Model]): Required. The model to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1.types.Model` @@ -1032,7 +1128,7 @@ def create_model( client_info=self._client_info, ) - request = service_pb2.CreateModelRequest(parent=parent, model=model) + request = service_pb2.CreateModelRequest(parent=parent, model=model,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1076,7 +1172,7 @@ def get_model( >>> response = client.get_model(name) Args: - name (str): Resource name of the model. + name (str): Required. Resource name of the model. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1107,7 +1203,7 @@ def get_model( client_info=self._client_info, ) - request = service_pb2.GetModelRequest(name=name) + request = service_pb2.GetModelRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1125,87 +1221,6 @@ def get_model( request, retry=retry, timeout=timeout, metadata=metadata ) - def update_model( - self, - model, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a model. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> # TODO: Initialize `model`: - >>> model = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_model(model, update_mask) - - Args: - model (Union[dict, ~google.cloud.automl_v1.types.Model]): The model which replaces the resource on the server. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.Model` - update_mask (Union[dict, ~google.cloud.automl_v1.types.FieldMask]): Required. The update mask applies to the resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types.Model` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_model" not in self._inner_api_calls: - self._inner_api_calls[ - "update_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_model, - default_retry=self._method_configs["UpdateModel"].retry, - default_timeout=self._method_configs["UpdateModel"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.UpdateModelRequest(model=model, update_mask=update_mask) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("model.name", model.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - def list_models( self, parent, @@ -1240,16 +1255,16 @@ def list_models( ... pass Args: - parent (str): Resource name of the project, from which to list the models. + parent (str): Required. Resource name of the project, from which to list the models. filter_ (str): An expression for filtering the results of the request. - ``model_metadata`` - for existence of the case (e.g. - image\_classification\_model\_metadata:\*). + video_classification_model_metadata:*). - ``dataset_id`` - for = or !=. Some examples of using the filter are: - ``image_classification_model_metadata:*`` --> The model has - image\_classification\_model\_metadata. + image_classification_model_metadata. - ``dataset_id=5`` --> The model was created from a dataset with ID 5. page_size (int): The maximum number of resources contained in the @@ -1291,7 +1306,7 @@ def list_models( ) request = service_pb2.ListModelsRequest( - parent=parent, filter=filter_, page_size=page_size + parent=parent, filter=filter_, page_size=page_size, ) if metadata is None: metadata = [] @@ -1321,38 +1336,39 @@ def list_models( ) return iterator - def delete_model( + def update_model( self, - name, + model, + update_mask, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Deletes a model. Returns ``google.protobuf.Empty`` in the ``response`` - field when it completes, and ``delete_details`` in the ``metadata`` - field. + Updates a model. Example: >>> from google.cloud import automl_v1 >>> >>> client = automl_v1.AutoMlClient() >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> response = client.delete_model(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() + >>> # TODO: Initialize `model`: + >>> model = {} >>> - >>> response.add_done_callback(callback) + >>> # TODO: Initialize `update_mask`: + >>> update_mask = {} >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.update_model(model, update_mask) Args: - name (str): Resource name of the model being deleted. + model (Union[dict, ~google.cloud.automl_v1.types.Model]): Required. The model which replaces the resource on the server. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1.types.Model` + update_mask (Union[dict, ~google.cloud.automl_v1.types.FieldMask]): Required. The update mask applies to the resource. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1363,7 +1379,7 @@ def delete_model( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. + A :class:`~google.cloud.automl_v1.types.Model` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1373,22 +1389,22 @@ def delete_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "delete_model" not in self._inner_api_calls: + if "update_model" not in self._inner_api_calls: self._inner_api_calls[ - "delete_model" + "update_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_model, - default_retry=self._method_configs["DeleteModel"].retry, - default_timeout=self._method_configs["DeleteModel"].timeout, + self.transport.update_model, + default_retry=self._method_configs["UpdateModel"].retry, + default_timeout=self._method_configs["UpdateModel"].timeout, client_info=self._client_info, ) - request = service_pb2.DeleteModelRequest(name=name) + request = service_pb2.UpdateModelRequest(model=model, update_mask=update_mask,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("model.name", model.name)] except AttributeError: pass else: @@ -1397,15 +1413,9 @@ def delete_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["delete_model"]( + return self._inner_api_calls["update_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) def deploy_model( self, @@ -1417,15 +1427,16 @@ def deploy_model( metadata=None, ): """ - Deploys a model. If a model is already deployed, deploying it with the - same parameters has no effect. Deploying with different parametrs (as - e.g. changing + Deploys a model. If a model is already deployed, deploying it with + the same parameters has no effect. Deploying with different parametrs + (as e.g. changing ``node_number``) will reset the deployment state without pausing the model's availability. - Only applicable for Text Classification, Image Object Detection; all - other domains manage deployment automatically. + Only applicable for Text Classification, Image Object Detection , + Tables, and Image Segmentation; all other domains manage deployment + automatically. Returns an empty response in the ``response`` field when it completes. @@ -1448,7 +1459,7 @@ def deploy_model( >>> metadata = response.metadata() Args: - name (str): Resource name of the model to deploy. + name (str): Required. Resource name of the model to deploy. image_object_detection_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1.types.ImageObjectDetectionModelDeploymentMetadata]): Model deployment metadata specific to Image Object Detection. If a dict is provided, it must be of the same form as the protobuf @@ -1533,8 +1544,8 @@ def undeploy_model( Undeploys a model. If the model is not deployed this method has no effect. - Only applicable for Text Classification, Image Object Detection; all - other domains manage deployment automatically. + Only applicable for Text Classification, Image Object Detection and + Tables; all other domains manage deployment automatically. Returns an empty response in the ``response`` field when it completes. @@ -1557,7 +1568,7 @@ def undeploy_model( >>> metadata = response.metadata() Args: - name (str): Resource name of the model to undeploy. + name (str): Required. Resource name of the model to undeploy. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1588,7 +1599,7 @@ def undeploy_model( client_info=self._client_info, ) - request = service_pb2.UndeployModelRequest(name=name) + request = service_pb2.UndeployModelRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1621,9 +1632,9 @@ def export_model( metadata=None, ): """ - Exports a trained, "export-able", model to a user specified Google Cloud - Storage location. A model is considered export-able if and only if it - has an export format defined for it in ``ModelExportOutputConfig``. + Exports a trained, "export-able", model to a user specified Google + Cloud Storage location. A model is considered export-able if and only if + it has an export format defined for it in ``ModelExportOutputConfig``. Returns an empty response in the ``response`` field when it completes. @@ -1684,7 +1695,9 @@ def export_model( client_info=self._client_info, ) - request = service_pb2.ExportModelRequest(name=name, output_config=output_config) + request = service_pb2.ExportModelRequest( + name=name, output_config=output_config, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -1728,7 +1741,7 @@ def get_model_evaluation( >>> response = client.get_model_evaluation(name) Args: - name (str): Resource name for the model evaluation. + name (str): Required. Resource name for the model evaluation. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1759,7 +1772,7 @@ def get_model_evaluation( client_info=self._client_info, ) - request = service_pb2.GetModelEvaluationRequest(name=name) + request = service_pb2.GetModelEvaluationRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1814,10 +1827,10 @@ def list_model_evaluations( ... pass Args: - parent (str): Resource name of the model to list the model evaluations for. + parent (str): Required. Resource name of the model to list the model evaluations for. If modelId is set as "-", this will list model evaluations from across all models of the parent location. - filter_ (str): An expression for filtering the results of the request. + filter_ (str): Required. An expression for filtering the results of the request. - ``annotation_spec_id`` - for =, != or existence. See example below for the last. @@ -1867,7 +1880,7 @@ def list_model_evaluations( ) request = service_pb2.ListModelEvaluationsRequest( - parent=parent, filter=filter_, page_size=page_size + parent=parent, filter=filter_, page_size=page_size, ) if metadata is None: metadata = [] diff --git a/google/cloud/automl_v1/gapic/auto_ml_client_config.py b/google/cloud/automl_v1/gapic/auto_ml_client_config.py index e54353d4..10a58643 100644 --- a/google/cloud/automl_v1/gapic/auto_ml_client_config.py +++ b/google/cloud/automl_v1/gapic/auto_ml_client_config.py @@ -17,18 +17,23 @@ } }, "methods": { - "CreateDataset": { - "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", + "DeleteDataset": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", "retry_params_name": "default", }, - "UpdateDataset": { - "timeout_millis": 5000, + "DeleteModel": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default", + }, + "CreateDataset": { + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, "GetDataset": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, @@ -37,9 +42,9 @@ "retry_codes_name": "idempotent", "retry_params_name": "default", }, - "DeleteDataset": { - "timeout_millis": 5000, - "retry_codes_name": "idempotent", + "UpdateDataset": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, "ImportData": { @@ -48,12 +53,12 @@ "retry_params_name": "default", }, "ExportData": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, "GetAnnotationSpec": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, @@ -63,42 +68,37 @@ "retry_params_name": "default", }, "GetModel": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, - "UpdateModel": { - "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, "ListModels": { "timeout_millis": 50000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, - "DeleteModel": { - "timeout_millis": 5000, - "retry_codes_name": "idempotent", + "UpdateModel": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, "DeployModel": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, "UndeployModel": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, "ExportModel": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, "GetModelEvaluation": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, diff --git a/google/cloud/automl_v1/gapic/enums.py b/google/cloud/automl_v1/gapic/enums.py index 8e525587..7bb5f2cb 100644 --- a/google/cloud/automl_v1/gapic/enums.py +++ b/google/cloud/automl_v1/gapic/enums.py @@ -45,12 +45,12 @@ class TextSegmentType(enum.IntEnum): TOKEN (int): The text segment is a token. e.g. word. PARAGRAPH (int): The text segment is a paragraph. FORM_FIELD (int): The text segment is a form field. - FORM_FIELD_NAME (int): The text segment is the name part of a form field. It will be treated as - child of another FORM\_FIELD TextSegment if its span is subspan of - another TextSegment with type FORM\_FIELD. - FORM_FIELD_CONTENTS (int): The text segment is the text content part of a form field. It will be - treated as child of another FORM\_FIELD TextSegment if its span is - subspan of another TextSegment with type FORM\_FIELD. + FORM_FIELD_NAME (int): The text segment is the name part of a form field. It will be + treated as child of another FORM_FIELD TextSegment if its span is + subspan of another TextSegment with type FORM_FIELD. + FORM_FIELD_CONTENTS (int): The text segment is the text content part of a form field. It will + be treated as child of another FORM_FIELD TextSegment if its span is + subspan of another TextSegment with type FORM_FIELD. TABLE (int): The text segment is a whole table, including headers, and all rows. TABLE_HEADER (int): The text segment is a table's headers. It will be treated as child of another TABLE TextSegment if its span is subspan of another TextSegment @@ -59,8 +59,8 @@ class TextSegmentType(enum.IntEnum): another TABLE TextSegment if its span is subspan of another TextSegment with type TABLE. TABLE_CELL (int): The text segment is a cell in table. It will be treated as child of - another TABLE\_ROW TextSegment if its span is subspan of another - TextSegment with type TABLE\_ROW. + another TABLE_ROW TextSegment if its span is subspan of another + TextSegment with type TABLE_ROW. """ TEXT_SEGMENT_TYPE_UNSPECIFIED = 0 diff --git a/google/cloud/automl_v1/gapic/prediction_service_client.py b/google/cloud/automl_v1/gapic/prediction_service_client.py index d6df5e54..06686df3 100644 --- a/google/cloud/automl_v1/gapic/prediction_service_client.py +++ b/google/cloud/automl_v1/gapic/prediction_service_client.py @@ -34,24 +34,15 @@ from google.cloud.automl_v1.gapic import enums from google.cloud.automl_v1.gapic import prediction_service_client_config from google.cloud.automl_v1.gapic.transports import prediction_service_grpc_transport -from google.cloud.automl_v1.proto import annotation_spec_pb2 from google.cloud.automl_v1.proto import data_items_pb2 -from google.cloud.automl_v1.proto import dataset_pb2 -from google.cloud.automl_v1.proto import image_pb2 from google.cloud.automl_v1.proto import io_pb2 -from google.cloud.automl_v1.proto import model_evaluation_pb2 -from google.cloud.automl_v1.proto import model_pb2 from google.cloud.automl_v1.proto import operations_pb2 as proto_operations_pb2 from google.cloud.automl_v1.proto import prediction_service_pb2 from google.cloud.automl_v1.proto import prediction_service_pb2_grpc -from google.cloud.automl_v1.proto import service_pb2 -from google.cloud.automl_v1.proto import service_pb2_grpc from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl").version +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl",).version class PredictionServiceClient(object): @@ -59,7 +50,7 @@ class PredictionServiceClient(object): AutoML Prediction API. On any input that is documented to expect a string parameter in - snake\_case or kebab-case, either of those cases is accepted. + snake_case or kebab-case, either of those cases is accepted. """ SERVICE_ADDRESS = "automl.googleapis.com:443" @@ -186,12 +177,12 @@ def __init__( self.transport = transport else: self.transport = prediction_service_grpc_transport.PredictionServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials + address=api_endpoint, channel=channel, credentials=credentials, ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION + gapic_version=_GAPIC_LIBRARY_VERSION, ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION @@ -202,7 +193,7 @@ def __init__( # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] + client_config["interfaces"][self._INTERFACE_NAME], ) # Save a dictionary of cached API call functions. @@ -222,22 +213,41 @@ def predict( metadata=None, ): """ - Perform an online prediction. The prediction result will be directly - returned in the response. Available for following ML problems, and their - expected request payloads: - - - Image Classification - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. - - Image Object Detection - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. - - Text Classification - TextSnippet, content up to 60,000 characters, - UTF-8 encoded. - - Text Extraction - TextSnippet, content up to 30,000 characters, UTF-8 - NFC encoded. - - Translation - TextSnippet, content up to 25,000 characters, UTF-8 - encoded. - - Text Sentiment - TextSnippet, content up 500 characters, UTF-8 - encoded. + Perform an online prediction. The prediction result is directly + returned in the response. Available for following ML scenarios, and + their expected request payloads: + + AutoML Vision Classification + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. + + AutoML Vision Object Detection + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. + + AutoML Natural Language Classification + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in + .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Natural Language Entity Extraction + + - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a + document in .PDF, .TIF or .TIFF format with size upto 20MB. + + AutoML Natural Language Sentiment Analysis + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in + .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Translation + + - A TextSnippet up to 25,000 characters, UTF-8 encoded. + + AutoML Tables + + - A row with column values matching the columns of the model, up to + 5MB. Not available for FORECASTING ``prediction_type``. Example: >>> from google.cloud import automl_v1 @@ -252,27 +262,37 @@ def predict( >>> response = client.predict(name, payload) Args: - name (str): Name of the model requested to serve the prediction. + name (str): Required. Name of the model requested to serve the prediction. payload (Union[dict, ~google.cloud.automl_v1.types.ExamplePayload]): Required. Payload to perform a prediction on. The payload must match the problem type that the model was trained to solve. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1.types.ExamplePayload` - params (dict[str -> str]): Additional domain-specific parameters, any string must be up to 25000 - characters long. + params (dict[str -> str]): Additional domain-specific parameters, any string must be up to + 25000 characters long. + + AutoML Vision Classification - - For Image Classification: + ``score_threshold`` : (float) A value from 0.0 to 1.0. When the model + makes predictions for an image, it will only produce results that have + at least this confidence score. The default is 0.5. - ``score_threshold`` - (float) A value from 0.0 to 1.0. When the model - makes predictions for an image, it will only produce results that - have at least this confidence score. The default is 0.5. + AutoML Vision Object Detection - - For Image Object Detection: ``score_threshold`` - (float) When Model - detects objects on the image, it will only produce bounding boxes - which have at least this confidence score. Value in 0 to 1 range, - default is 0.5. ``max_bounding_box_count`` - (int64) No more than - this number of bounding boxes will be returned in the response. - Default is 100, the requested value may be limited by server. + ``score_threshold`` : (float) When Model detects objects on the image, + it will only produce bounding boxes which have at least this confidence + score. Value in 0 to 1 range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number of bounding + boxes returned. The default is 100. The number of returned bounding + boxes might be limited by the server. + + AutoML Tables + + ``feature_importance`` : (boolean) Whether + + ``feature_importance`` is populated in the returned list of + ``TablesAnnotation`` objects. The default is false. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -304,7 +324,7 @@ def predict( ) request = prediction_service_pb2.PredictRequest( - name=name, payload=payload, params=params + name=name, payload=payload, params=params, ) if metadata is None: metadata = [] @@ -339,11 +359,16 @@ def batch_predict( Instead, a long running operation object is returned. User can poll the operation result via ``GetOperation`` method. Once the operation is done, ``BatchPredictResult`` is returned in the ``response`` field. - Available for following ML problems: + Available for following ML scenarios: - - Image Classification - - Image Object Detection - - Text Extraction + - AutoML Vision Classification + - AutoML Vision Object Detection + - AutoML Video Intelligence Classification + - AutoML Video Intelligence Object Tracking \* AutoML Natural Language + Classification + - AutoML Natural Language Entity Extraction + - AutoML Natural Language Sentiment Analysis + - AutoML Tables Example: >>> from google.cloud import automl_v1 @@ -370,7 +395,7 @@ def batch_predict( >>> metadata = response.metadata() Args: - name (str): Name of the model requested to serve the batch prediction. + name (str): Required. Name of the model requested to serve the batch prediction. input_config (Union[dict, ~google.cloud.automl_v1.types.BatchPredictInputConfig]): Required. The input configuration for batch prediction. If a dict is provided, it must be of the same form as the protobuf @@ -380,29 +405,75 @@ def batch_predict( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1.types.BatchPredictOutputConfig` - params (dict[str -> str]): Additional domain-specific parameters for the predictions, any string - must be up to 25000 characters long. + params (dict[str -> str]): Additional domain-specific parameters for the predictions, any + string must be up to 25000 characters long. + + AutoML Natural Language Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. When the model + makes predictions for a text snippet, it will only produce results that + have at least this confidence score. The default is 0.5. + + AutoML Vision Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. When the model + makes predictions for an image, it will only produce results that have + at least this confidence score. The default is 0.5. + + AutoML Vision Object Detection + + ``score_threshold`` : (float) When Model detects objects on the image, + it will only produce bounding boxes which have at least this confidence + score. Value in 0 to 1 range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number of bounding + boxes returned per image. The default is 100, the number of bounding + boxes returned might be limited by the server. AutoML Video Intelligence + Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. When the model + makes predictions for a video, it will only produce results that have at + least this confidence score. The default is 0.5. + + ``segment_classification`` : (boolean) Set to true to request + segment-level classification. AutoML Video Intelligence returns labels + and their confidence scores for the entire segment of the video that + user specified in the request configuration. The default is true. + + ``shot_classification`` : (boolean) Set to true to request shot-level + classification. AutoML Video Intelligence determines the boundaries for + each camera shot in the entire segment of the video that user specified + in the request configuration. AutoML Video Intelligence then returns + labels and their confidence scores for each detected shot, along with + the start and end time of the shot. The default is false. + + WARNING: Model evaluation is not done for this classification type, the + quality of it depends on training data, but there are no metrics + provided to describe that quality. - - For Text Classification: + ``1s_interval_classification`` : (boolean) Set to true to request + classification for a video at one-second intervals. AutoML Video + Intelligence returns labels and their confidence scores for each second + of the entire segment of the video that user specified in the request + configuration. The default is false. - ``score_threshold`` - (float) A value from 0.0 to 1.0. When the model - makes predictions for a text snippet, it will only produce results - that have at least this confidence score. The default is 0.5. + WARNING: Model evaluation is not done for this classification type, the + quality of it depends on training data, but there are no metrics + provided to describe that quality. - - For Image Classification: + AutoML Video Intelligence Object Tracking - ``score_threshold`` - (float) A value from 0.0 to 1.0. When the model - makes predictions for an image, it will only produce results that - have at least this confidence score. The default is 0.5. + ``score_threshold`` : (float) When Model detects objects on video + frames, it will only produce bounding boxes which have at least this + confidence score. Value in 0 to 1 range, default is 0.5. - - For Image Object Detection: + ``max_bounding_box_count`` : (int64) The maximum number of bounding + boxes returned per image. The default is 100, the number of bounding + boxes returned might be limited by the server. - ``score_threshold`` - (float) When Model detects objects on the - image, it will only produce bounding boxes which have at least this - confidence score. Value in 0 to 1 range, default is 0.5. - ``max_bounding_box_count`` - (int64) No more than this number of - bounding boxes will be produced per image. Default is 100, the - requested value may be limited by server. + ``min_bounding_box_size`` : (float) Only bounding boxes with shortest + edge at least that long as a relative value of video frame size are + returned. Value in 0 to 1 range. Default is 0. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. diff --git a/google/cloud/automl_v1/gapic/transports/auto_ml_grpc_transport.py b/google/cloud/automl_v1/gapic/transports/auto_ml_grpc_transport.py index c5f6bfa7..2f8d0531 100644 --- a/google/cloud/automl_v1/gapic/transports/auto_ml_grpc_transport.py +++ b/google/cloud/automl_v1/gapic/transports/auto_ml_grpc_transport.py @@ -54,7 +54,7 @@ def __init__( # exception (channels come with credentials baked in already). if channel is not None and credentials is not None: raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." + "The `channel` and `credentials` arguments are mutually " "exclusive.", ) # Create the channel. @@ -72,7 +72,9 @@ def __init__( # gRPC uses objects called "stubs" that are bound to the # channel and provide a basic method for each RPC. - self._stubs = {"auto_ml_stub": service_pb2_grpc.AutoMlStub(channel)} + self._stubs = { + "auto_ml_stub": service_pb2_grpc.AutoMlStub(channel), + } # Because this API includes a method that returns a # long-running operation (proto: google.longrunning.Operation), @@ -114,30 +116,47 @@ def channel(self): return self._channel @property - def create_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.create_dataset`. + def delete_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.delete_dataset`. - Creates a dataset. + Deletes a dataset and all of its contents. Returns empty response in + the ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].CreateDataset + return self._stubs["auto_ml_stub"].DeleteDataset @property - def update_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_dataset`. + def delete_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.delete_model`. - Updates a dataset. + Deletes a model. Returns ``google.protobuf.Empty`` in the + ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].UpdateDataset + return self._stubs["auto_ml_stub"].DeleteModel + + @property + def create_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.create_dataset`. + + Creates a dataset. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["auto_ml_stub"].CreateDataset @property def get_dataset(self): @@ -166,25 +185,30 @@ def list_datasets(self): return self._stubs["auto_ml_stub"].ListDatasets @property - def delete_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.delete_dataset`. + def update_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.update_dataset`. - Deletes a dataset and all of its contents. Returns empty response in the - ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. + Updates a dataset. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].DeleteDataset + return self._stubs["auto_ml_stub"].UpdateDataset @property def import_data(self): """Return the gRPC stub for :meth:`AutoMlClient.import_data`. - Imports data into a dataset. + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A ``schema_inference_version`` parameter must be explicitly set. + Returns an empty response in the ``response`` field when it + completes. Returns: Callable: A callable which accepts the appropriate @@ -197,8 +221,8 @@ def import_data(self): def export_data(self): """Return the gRPC stub for :meth:`AutoMlClient.export_data`. - Exports dataset's data to the provided output location. Returns an empty - response in the ``response`` field when it completes. + Exports dataset's data to the provided output location. Returns an + empty response in the ``response`` field when it completes. Returns: Callable: A callable which accepts the appropriate @@ -249,19 +273,6 @@ def get_model(self): """ return self._stubs["auto_ml_stub"].GetModel - @property - def update_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_model`. - - Updates a model. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].UpdateModel - @property def list_models(self): """Return the gRPC stub for :meth:`AutoMlClient.list_models`. @@ -276,33 +287,32 @@ def list_models(self): return self._stubs["auto_ml_stub"].ListModels @property - def delete_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.delete_model`. + def update_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.update_model`. - Deletes a model. Returns ``google.protobuf.Empty`` in the ``response`` - field when it completes, and ``delete_details`` in the ``metadata`` - field. + Updates a model. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].DeleteModel + return self._stubs["auto_ml_stub"].UpdateModel @property def deploy_model(self): """Return the gRPC stub for :meth:`AutoMlClient.deploy_model`. - Deploys a model. If a model is already deployed, deploying it with the - same parameters has no effect. Deploying with different parametrs (as - e.g. changing + Deploys a model. If a model is already deployed, deploying it with + the same parameters has no effect. Deploying with different parametrs + (as e.g. changing ``node_number``) will reset the deployment state without pausing the model's availability. - Only applicable for Text Classification, Image Object Detection; all - other domains manage deployment automatically. + Only applicable for Text Classification, Image Object Detection , + Tables, and Image Segmentation; all other domains manage deployment + automatically. Returns an empty response in the ``response`` field when it completes. @@ -320,8 +330,8 @@ def undeploy_model(self): Undeploys a model. If the model is not deployed this method has no effect. - Only applicable for Text Classification, Image Object Detection; all - other domains manage deployment automatically. + Only applicable for Text Classification, Image Object Detection and + Tables; all other domains manage deployment automatically. Returns an empty response in the ``response`` field when it completes. @@ -336,9 +346,9 @@ def undeploy_model(self): def export_model(self): """Return the gRPC stub for :meth:`AutoMlClient.export_model`. - Exports a trained, "export-able", model to a user specified Google Cloud - Storage location. A model is considered export-able if and only if it - has an export format defined for it in ``ModelExportOutputConfig``. + Exports a trained, "export-able", model to a user specified Google + Cloud Storage location. A model is considered export-able if and only if + it has an export format defined for it in ``ModelExportOutputConfig``. Returns an empty response in the ``response`` field when it completes. diff --git a/google/cloud/automl_v1/gapic/transports/prediction_service_grpc_transport.py b/google/cloud/automl_v1/gapic/transports/prediction_service_grpc_transport.py index 9d494540..c94538be 100644 --- a/google/cloud/automl_v1/gapic/transports/prediction_service_grpc_transport.py +++ b/google/cloud/automl_v1/gapic/transports/prediction_service_grpc_transport.py @@ -54,7 +54,7 @@ def __init__( # exception (channels come with credentials baked in already). if channel is not None and credentials is not None: raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." + "The `channel` and `credentials` arguments are mutually " "exclusive.", ) # Create the channel. @@ -75,7 +75,7 @@ def __init__( self._stubs = { "prediction_service_stub": prediction_service_pb2_grpc.PredictionServiceStub( channel - ) + ), } # Because this API includes a method that returns a @@ -121,22 +121,41 @@ def channel(self): def predict(self): """Return the gRPC stub for :meth:`PredictionServiceClient.predict`. - Perform an online prediction. The prediction result will be directly - returned in the response. Available for following ML problems, and their - expected request payloads: - - - Image Classification - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. - - Image Object Detection - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. - - Text Classification - TextSnippet, content up to 60,000 characters, - UTF-8 encoded. - - Text Extraction - TextSnippet, content up to 30,000 characters, UTF-8 - NFC encoded. - - Translation - TextSnippet, content up to 25,000 characters, UTF-8 - encoded. - - Text Sentiment - TextSnippet, content up 500 characters, UTF-8 - encoded. + Perform an online prediction. The prediction result is directly + returned in the response. Available for following ML scenarios, and + their expected request payloads: + + AutoML Vision Classification + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. + + AutoML Vision Object Detection + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. + + AutoML Natural Language Classification + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in + .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Natural Language Entity Extraction + + - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a + document in .PDF, .TIF or .TIFF format with size upto 20MB. + + AutoML Natural Language Sentiment Analysis + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in + .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Translation + + - A TextSnippet up to 25,000 characters, UTF-8 encoded. + + AutoML Tables + + - A row with column values matching the columns of the model, up to + 5MB. Not available for FORECASTING ``prediction_type``. Returns: Callable: A callable which accepts the appropriate @@ -154,11 +173,16 @@ def batch_predict(self): Instead, a long running operation object is returned. User can poll the operation result via ``GetOperation`` method. Once the operation is done, ``BatchPredictResult`` is returned in the ``response`` field. - Available for following ML problems: - - - Image Classification - - Image Object Detection - - Text Extraction + Available for following ML scenarios: + + - AutoML Vision Classification + - AutoML Vision Object Detection + - AutoML Video Intelligence Classification + - AutoML Video Intelligence Object Tracking \* AutoML Natural Language + Classification + - AutoML Natural Language Entity Extraction + - AutoML Natural Language Sentiment Analysis + - AutoML Tables Returns: Callable: A callable which accepts the appropriate diff --git a/google/cloud/automl_v1/proto/annotation_payload.proto b/google/cloud/automl_v1/proto/annotation_payload.proto index 980c0e36..a81feaf1 100644 --- a/google/cloud/automl_v1/proto/annotation_payload.proto +++ b/google/cloud/automl_v1/proto/annotation_payload.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1/proto/annotation_payload_pb2.py b/google/cloud/automl_v1/proto/annotation_payload_pb2.py index 7840971f..2455a7f5 100644 --- a/google/cloud/automl_v1/proto/annotation_payload_pb2.py +++ b/google/cloud/automl_v1/proto/annotation_payload_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1/proto/annotation_payload.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -38,12 +35,8 @@ name="google/cloud/automl_v1/proto/annotation_payload.proto", package="google.cloud.automl.v1", syntax="proto3", - serialized_options=_b( - "\n\032com.google.cloud.automl.v1P\001Z // // +//

AutoML Video Intelligence

+// +// +//
Classification
+// +// See [Preparing your training +// data](https://cloud.google.com/video-intelligence/automl/docs/prepare) for +// more information. +// +// CSV file(s) with each line in format: +// +// ML_USE,GCS_FILE_PATH +// +// For `ML_USE`, do not use `VALIDATE`. +// +// `GCS_FILE_PATH` is the path to another .csv file that describes training +// example for a given `ML_USE`, using the following row format: +// +// GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,) +// +// Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up +// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. +// +// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the +// length of the video, and the end time must be after the start time. Any +// segment of a video which has one or more labels on it, is considered a +// hard negative for all other labels. Any segment with no labels on +// it is considered to be unknown. If a whole video is unknown, then +// it should be mentioned just once with ",," in place of `LABEL, +// TIME_SEGMENT_START,TIME_SEGMENT_END`. +// +// Sample top level CSV file: +// +// TRAIN,gs://folder/train_videos.csv +// TEST,gs://folder/test_videos.csv +// UNASSIGNED,gs://folder/other_videos.csv +// +// Sample rows of a CSV file for a particular ML_USE: +// +// gs://folder/video1.avi,car,120,180.000021 +// gs://folder/video1.avi,bike,150,180.000021 +// gs://folder/vid2.avi,car,0,60.5 +// gs://folder/vid3.avi,,, +// +// +// +//
Object Tracking
+// +// See [Preparing your training +// data](/video-intelligence/automl/object-tracking/docs/prepare) for more +// information. +// +// CSV file(s) with each line in format: +// +// ML_USE,GCS_FILE_PATH +// +// For `ML_USE`, do not use `VALIDATE`. +// +// `GCS_FILE_PATH` is the path to another .csv file that describes training +// example for a given `ML_USE`, using the following row format: +// +// GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX +// +// or +// +// GCS_FILE_PATH,,,,,,,,,, +// +// Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up +// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. +// Providing `INSTANCE_ID`s can help to obtain a better model. When +// a specific labeled entity leaves the video frame, and shows up +// afterwards it is not required, albeit preferable, that the same +// `INSTANCE_ID` is given to it. +// +// `TIMESTAMP` must be within the length of the video, the +// `BOUNDING_BOX` is assumed to be drawn on the closest video's frame +// to the `TIMESTAMP`. Any mentioned by the `TIMESTAMP` frame is expected +// to be exhaustively labeled and no more than 500 `BOUNDING_BOX`-es per +// frame are allowed. If a whole video is unknown, then it should be +// mentioned just once with ",,,,,,,,,," in place of `LABEL, +// [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX`. +// +// Sample top level CSV file: +// +// TRAIN,gs://folder/train_videos.csv +// TEST,gs://folder/test_videos.csv +// UNASSIGNED,gs://folder/other_videos.csv +// +// Seven sample rows of a CSV file for a particular ML_USE: +// +// gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 +// gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 +// gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 +// gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, +// gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, +// gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, +// gs://folder/video2.avi,,,,,,,,,,, +//
+//
+// +// //

AutoML Natural Language

// // @@ -223,9 +322,11 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // **JSONL files that reference documents** // // .JSONL files contain, per line, a JSON document that wraps a -// `input_config` that contains the path to a source PDF document. +// `input_config` that contains the path to a source document. // Multiple JSON documents can be separated using line breaks (\n). // +// Supported document extensions: .PDF, .TIF, .TIFF +// // For example: // // { @@ -239,19 +340,19 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // { // "document": { // "input_config": { -// "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] +// "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] // } // } // } // } // -// **In-line JSONL files with PDF layout information** +// **In-line JSONL files with document layout information** // -// **Note:** You can only annotate PDF files using the UI. The format described -// below applies to annotated PDF files exported using the UI or `exportData`. +// **Note:** You can only annotate documents using the UI. The format described +// below applies to annotated documents exported using the UI or `exportData`. // -// In-line .JSONL files for PDF documents contain, per line, a JSON document -// that wraps a `document` field that provides the textual content of the PDF +// In-line .JSONL files for documents contain, per line, a JSON document +// that wraps a `document` field that provides the textual content of the // document and the layout information. // // For example: @@ -342,8 +443,9 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // 10MB or less in size. // // For the `MULTICLASS` classification type, at most one `LABEL` is allowed. +// // The `ML_USE` and `LABEL` columns are optional. -// Supported file extensions: .TXT, .PDF, .ZIP +// Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP // // A maximum of 100 unique labels are allowed per CSV row. // @@ -388,7 +490,7 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // 128kB or less in size. // // The `ML_USE` and `SENTIMENT` columns are optional. -// Supported file extensions: .TXT, .PDF, .ZIP +// Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP // // * `SENTIMENT` - An integer between 0 and // Dataset.text_sentiment_dataset_metadata.sentiment_max @@ -417,6 +519,54 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // // // +// +//

AutoML Tables

+// +// See [Preparing your training +// data](https://cloud.google.com/automl-tables/docs/prepare) for more +// information. +// +// You can use either +// [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or +// [bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source]. +// All input is concatenated into a +// single +// +// [primary_table_spec_id][google.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id] +// +// **For gcs_source:** +// +// CSV file(s), where the first row of the first file is the header, +// containing unique column names. If the first row of a subsequent +// file is the same as the header, then it is also treated as a +// header. All other rows contain values for the corresponding +// columns. +// +// Each .CSV file by itself must be 10GB or smaller, and their total +// size must be 100GB or smaller. +// +// First three sample rows of a CSV file: +//
+// "Id","First Name","Last Name","Dob","Addresses"
+//
+// "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
+//
+// "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
+// 
+// **For bigquery_source:** +// +// An URI of a BigQuery table. The user data size of the BigQuery +// table must be 100GB or smaller. +// +// An imported table must have between 2 and 1,000 columns, inclusive, +// and between 1000 and 100,000,000 rows, inclusive. There are at most 5 +// import data running in parallel. +// +//
+//
+// +// // **Input field definitions:** // // `ML_USE` @@ -435,6 +585,11 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // For each label an AnnotationSpec is created which display_name // becomes the label; AnnotationSpecs are given back in predictions. // +// `INSTANCE_ID` +// : A positive integer that identifies a specific instance of a +// labeled entity on an example. Used e.g. to track two cars on +// a video while being able to tell apart which one is which. +// // `BOUNDING_BOX` // : (`VERTEX,VERTEX,VERTEX,VERTEX` | `VERTEX,,,VERTEX,,`) // A rectangle parallel to the frame of the example (image, @@ -452,6 +607,23 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // leading non-decimal 0 can be omitted (i.e. 0.3 = .3). // Point 0,0 is in top left. // +// `TIME_SEGMENT_START` +// : (`TIME_OFFSET`) +// Expresses a beginning, inclusive, of a time segment +// within an example that has a time dimension +// (e.g. video). +// +// `TIME_SEGMENT_END` +// : (`TIME_OFFSET`) +// Expresses an end, exclusive, of a time segment within +// n example that has a time dimension (e.g. video). +// +// `TIME_OFFSET` +// : A number of seconds as measured from the start of an +// example (e.g. video). Fractions are allowed, up to a +// microsecond precision. "inf" is allowed, and it means the end +// of the example. +// // `TEXT_SNIPPET` // : The content of a text snippet, UTF-8 encoded, enclosed within // double quotes (""). @@ -473,15 +645,22 @@ message InputConfig { // The source of the input. oneof source { // The Google Cloud Storage location for the input content. - // For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], - // `gcs_source` points to a CSV file with a structure described in - // [InputConfig][google.cloud.automl.v1.InputConfig]. + // For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with + // a structure described in [InputConfig][google.cloud.automl.v1.InputConfig]. GcsSource gcs_source = 1; } // Additional domain-specific parameters describing the semantic of the // imported data, any string must be up to 25000 // characters long. + // + //

AutoML Tables

+ // + // `schema_inference_version` + // : (integer) This value must be supplied. + // The version of the + // algorithm to use for the initial inference of the + // column data types of the imported table. Allowed values: "1". map params = 2; } @@ -496,6 +675,82 @@ message InputConfig { // non-terminal symbols defined near the end of this comment. The formats // are: // +//

AutoML Vision

+//
Classification
+// +// One or more CSV files where each line is a single column: +// +// GCS_FILE_PATH +// +// The Google Cloud Storage location of an image of up to +// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. +// This path is treated as the ID in the batch predict output. +// +// Sample rows: +// +// gs://folder/image1.jpeg +// gs://folder/image2.gif +// gs://folder/image3.png +// +//
Object Detection
+// +// One or more CSV files where each line is a single column: +// +// GCS_FILE_PATH +// +// The Google Cloud Storage location of an image of up to +// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. +// This path is treated as the ID in the batch predict output. +// +// Sample rows: +// +// gs://folder/image1.jpeg +// gs://folder/image2.gif +// gs://folder/image3.png +//
+//
+// +//

AutoML Video Intelligence

+//
Classification
+// +// One or more CSV files where each line is a single column: +// +// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END +// +// `GCS_FILE_PATH` is the Google Cloud Storage location of video up to 50GB in +// size and up to 3h in duration duration. +// Supported extensions: .MOV, .MPEG4, .MP4, .AVI. +// +// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the +// length of the video, and the end time must be after the start time. +// +// Sample rows: +// +// gs://folder/video1.mp4,10,40 +// gs://folder/video1.mp4,20,60 +// gs://folder/vid2.mov,0,inf +// +//
Object Tracking
+// +// One or more CSV files where each line is a single column: +// +// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END +// +// `GCS_FILE_PATH` is the Google Cloud Storage location of video up to 50GB in +// size and up to 3h in duration duration. +// Supported extensions: .MOV, .MPEG4, .MP4, .AVI. +// +// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the +// length of the video, and the end time must be after the start time. +// +// Sample rows: +// +// gs://folder/video1.mp4,10,40 +// gs://folder/video1.mp4,20,60 +// gs://folder/vid2.mov,0,inf +//
+//
+// //

AutoML Natural Language

//
Classification
// @@ -504,13 +759,15 @@ message InputConfig { // GCS_FILE_PATH // // `GCS_FILE_PATH` is the Google Cloud Storage location of a text file. -// Supported file extensions: .TXT, .PDF +// Supported file extensions: .TXT, .PDF, .TIF, .TIFF +// // Text files can be no larger than 10MB in size. // // Sample rows: // // gs://folder/text1.txt // gs://folder/text2.pdf +// gs://folder/text3.tif // //
Sentiment Analysis
// One or more CSV files where each line is a single column: @@ -518,13 +775,15 @@ message InputConfig { // GCS_FILE_PATH // // `GCS_FILE_PATH` is the Google Cloud Storage location of a text file. -// Supported file extensions: .TXT, .PDF +// Supported file extensions: .TXT, .PDF, .TIF, .TIFF +// // Text files can be no larger than 128kB in size. // // Sample rows: // // gs://folder/text1.txt // gs://folder/text2.pdf +// gs://folder/text3.tif // //
Entity Extraction
// @@ -540,9 +799,10 @@ message InputConfig { // be UTF-8 NFC encoded (ASCII already is). The IDs provided should be // unique. // -// Each document JSONL file contains, per line, a proto that wraps a -// Document proto with `input_config` set. Only PDF documents are -// currently supported, and each PDF document cannot exceed 2MB in size. +// Each document JSONL file contains, per line, a proto that wraps a Document +// proto with `input_config` set. Each document cannot exceed 2MB in size. +// +// Supported document extensions: .PDF, .TIF, .TIFF // // Each JSONL file must not exceed 100MB in size, and no more than 20 // JSONL files may be passed. @@ -590,7 +850,7 @@ message InputConfig { // { // "document": { // "input_config": { -// "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] +// "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] // } // } // } @@ -598,12 +858,83 @@ message InputConfig { //
//
// +//

AutoML Tables

+// +// See [Preparing your training +// data](https://cloud.google.com/automl-tables/docs/predict-batch) for more +// information. +// +// You can use either +// [gcs_source][google.cloud.automl.v1.BatchPredictInputConfig.gcs_source] +// or +// [bigquery_source][BatchPredictInputConfig.bigquery_source]. +// +// **For gcs_source:** +// +// CSV file(s), each by itself 10GB or smaller and total size must be +// 100GB or smaller, where first file must have a header containing +// column names. If the first row of a subsequent file is the same as +// the header, then it is also treated as a header. All other rows +// contain values for the corresponding columns. +// +// The column names must contain the model's +// +// [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs] +// [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] +// (order doesn't matter). The columns corresponding to the model's +// input feature column specs must contain values compatible with the +// column spec's data types. Prediction on all the rows, i.e. the CSV +// lines, will be attempted. +// +// +// Sample rows from a CSV file: +//
+// "First Name","Last Name","Dob","Addresses"
+//
+// "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
+//
+// "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
+// 
+// **For bigquery_source:** +// +// The URI of a BigQuery table. The user data size of the BigQuery +// table must be 100GB or smaller. +// +// The column names must contain the model's +// +// [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs] +// [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] +// (order doesn't matter). The columns corresponding to the model's +// input feature column specs must contain values compatible with the +// column spec's data types. Prediction on all the rows of the table +// will be attempted. +//
+//
+// // **Input field definitions:** // // `GCS_FILE_PATH` // : The path to a file on Google Cloud Storage. For example, // "gs://folder/video.avi". // +// `TIME_SEGMENT_START` +// : (`TIME_OFFSET`) +// Expresses a beginning, inclusive, of a time segment +// within an example that has a time dimension +// (e.g. video). +// +// `TIME_SEGMENT_END` +// : (`TIME_OFFSET`) +// Expresses an end, exclusive, of a time segment within +// n example that has a time dimension (e.g. video). +// +// `TIME_OFFSET` +// : A number of seconds as measured from the start of an +// example (e.g. video). Fractions are allowed, up to a +// microsecond precision. "inf" is allowed, and it means the end +// of the example. +// // **Errors:** // // If any of the provided CSV files can't be parsed or if more than certain @@ -630,82 +961,43 @@ message DocumentInputConfig { GcsSource gcs_source = 1; } -// Output configuration for ExportData. -// -// As destination the -// [gcs_destination][google.cloud.automl.v1.OutputConfig.gcs_destination] -// must be set unless specified otherwise for a domain. If gcs_destination is -// set then in the given directory a new directory is created. Its name -// will be "export_data--", -// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. -// Only ground truth annotations are exported (not approved annotations are -// not exported). -// -// The outputs correspond to how the data was imported, and may be used as -// input to import data. The output formats are represented as EBNF with literal -// commas and same non-terminal symbols definitions are these in import data's -// [InputConfig][google.cloud.automl.v1.InputConfig]: -// -// * For Image Classification: -// CSV file(s) `image_classification_1.csv`, -// `image_classification_2.csv`,...,`image_classification_N.csv`with -// each line in format: -// ML_USE,GCS_FILE_PATH,LABEL,LABEL,... -// where GCS_FILE_PATHs point at the original, source locations of the -// imported images. -// For MULTICLASS classification type, there can be at most one LABEL -// per example. -// -// * For Image Object Detection: -// CSV file(s) `image_object_detection_1.csv`, -// `image_object_detection_2.csv`,...,`image_object_detection_N.csv` -// with each line in format: -// ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,) -// where GCS_FILE_PATHs point at the original, source locations of the -// imported images. -// -// * For Text Classification: -// In the created directory CSV file(s) `text_classification_1.csv`, -// `text_classification_2.csv`, ...,`text_classification_N.csv` will be -// created where N depends on the total number of examples exported. -// Each line in the CSV is of the format: -// ML_USE,GCS_FILE_PATH,LABEL,LABEL,... -// where GCS_FILE_PATHs point at the exported .txt files containing -// the text content of the imported example. For MULTICLASS -// classification type, there will be at most one LABEL per example. -// -// * For Text Sentiment: -// In the created directory CSV file(s) `text_sentiment_1.csv`, -// `text_sentiment_2.csv`, ...,`text_sentiment_N.csv` will be -// created where N depends on the total number of examples exported. -// Each line in the CSV is of the format: -// ML_USE,GCS_FILE_PATH,SENTIMENT -// where GCS_FILE_PATHs point at the exported .txt files containing -// the text content of the imported example. -// -// * For Text Extraction: -// CSV file `text_extraction.csv`, with each line in format: -// ML_USE,GCS_FILE_PATH -// GCS_FILE_PATH leads to a .JSONL (i.e. JSON Lines) file which -// contains, per line, a proto that wraps a TextSnippet proto (in json -// representation) followed by AnnotationPayload protos (called -// annotations). If initially documents had been imported, the JSONL -// will point at the original, source locations of the imported -// documents. -// -// * For Translation: +// * For Translation: // CSV file `translation.csv`, with each line in format: // ML_USE,GCS_FILE_PATH // GCS_FILE_PATH leads to a .TSV file which describes examples that have // given ML_USE, using the following row format per line: // TEXT_SNIPPET (in source language) \t TEXT_SNIPPET (in target // language) +// +// * For Tables: +// Output depends on whether the dataset was imported from Google Cloud +// Storage or BigQuery. +// Google Cloud Storage case: +// +// [gcs_destination][google.cloud.automl.v1p1beta.OutputConfig.gcs_destination] +// must be set. Exported are CSV file(s) `tables_1.csv`, +// `tables_2.csv`,...,`tables_N.csv` with each having as header line +// the table's column names, and all other lines contain values for +// the header columns. +// BigQuery case: +// +// [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] +// pointing to a BigQuery project must be set. In the given project a +// new dataset will be created with name +// +// `export_data__` +// where will be made +// BigQuery-dataset-name compatible (e.g. most special characters will +// become underscores), and timestamp will be in +// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that +// dataset a new table called `primary_table` will be created, and +// filled with precisely the same data as this obtained on import. message OutputConfig { // The destination of the output. oneof destination { - // Required. The Google Cloud Storage location where the output is to be - // written to. For Image Object Detection, Text Extraction in the given - // directory a new directory will be created with name: + // Required. The Google Cloud Storage location where the output is to be written to. + // For Image Object Detection, Text Extraction, Video Classification and + // Tables, in the given directory a new directory will be created with name: // export_data-- where // timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export // output will be written into that directory. @@ -725,6 +1017,101 @@ message OutputConfig { // where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents // of it depends on the ML problem the predictions are made for. // +// * For Image Classification: +// In the created directory files `image_classification_1.jsonl`, +// `image_classification_2.jsonl`,...,`image_classification_N.jsonl` +// will be created, where N may be 1, and depends on the +// total number of the successfully predicted images and annotations. +// A single image will be listed only once with all its annotations, +// and its annotations will never be split across files. +// Each .JSONL file will contain, per line, a JSON representation of a +// proto that wraps image's "ID" : "" followed by a list of +// zero or more AnnotationPayload protos (called annotations), which +// have classification detail populated. +// If prediction for any image failed (partially or completely), then an +// additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl` +// files will be created (N depends on total number of failed +// predictions). These files will have a JSON representation of a proto +// that wraps the same "ID" : "" but here followed by +// exactly one +// +// [`google.rpc.Status`](https: +// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) +// containing only `code` and `message`fields. +// +// * For Image Object Detection: +// In the created directory files `image_object_detection_1.jsonl`, +// `image_object_detection_2.jsonl`,...,`image_object_detection_N.jsonl` +// will be created, where N may be 1, and depends on the +// total number of the successfully predicted images and annotations. +// Each .JSONL file will contain, per line, a JSON representation of a +// proto that wraps image's "ID" : "" followed by a list of +// zero or more AnnotationPayload protos (called annotations), which +// have image_object_detection detail populated. A single image will +// be listed only once with all its annotations, and its annotations +// will never be split across files. +// If prediction for any image failed (partially or completely), then +// additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl` +// files will be created (N depends on total number of failed +// predictions). These files will have a JSON representation of a proto +// that wraps the same "ID" : "" but here followed by +// exactly one +// +// [`google.rpc.Status`](https: +// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) +// containing only `code` and `message`fields. +// * For Video Classification: +// In the created directory a video_classification.csv file, and a .JSON +// file per each video classification requested in the input (i.e. each +// line in given CSV(s)), will be created. +// +// The format of video_classification.csv is: +// +// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS +// where: +// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 +// the prediction input lines (i.e. video_classification.csv has +// precisely the same number of lines as the prediction input had.) +// JSON_FILE_NAME = Name of .JSON file in the output directory, which +// contains prediction responses for the video time segment. +// STATUS = "OK" if prediction completed successfully, or an error code +// with message otherwise. If STATUS is not "OK" then the .JSON file +// for that line may not exist or be empty. +// +// Each .JSON file, assuming STATUS is "OK", will contain a list of +// AnnotationPayload protos in JSON format, which are the predictions +// for the video time segment the file is assigned to in the +// video_classification.csv. All AnnotationPayload protos will have +// video_classification field set, and will be sorted by +// video_classification.type field (note that the returned types are +// governed by `classifaction_types` parameter in +// [PredictService.BatchPredictRequest.params][]). +// +// * For Video Object Tracking: +// In the created directory a video_object_tracking.csv file will be +// created, and multiple files video_object_trackinng_1.json, +// video_object_trackinng_2.json,..., video_object_trackinng_N.json, +// where N is the number of requests in the input (i.e. the number of +// lines in given CSV(s)). +// +// The format of video_object_tracking.csv is: +// +// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS +// where: +// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 +// the prediction input lines (i.e. video_object_tracking.csv has +// precisely the same number of lines as the prediction input had.) +// JSON_FILE_NAME = Name of .JSON file in the output directory, which +// contains prediction responses for the video time segment. +// STATUS = "OK" if prediction completed successfully, or an error +// code with message otherwise. If STATUS is not "OK" then the .JSON +// file for that line may not exist or be empty. +// +// Each .JSON file, assuming STATUS is "OK", will contain a list of +// AnnotationPayload protos in JSON format, which are the predictions +// for each frame of the video time segment the file is assigned to in +// video_object_tracking.csv. All AnnotationPayload protos will have +// video_object_tracking field set. // * For Text Classification: // In the created directory files `text_classification_1.jsonl`, // `text_classification_2.jsonl`,...,`text_classification_N.jsonl` @@ -732,18 +1119,18 @@ message OutputConfig { // total number of inputs and annotations found. // // Each .JSONL file will contain, per line, a JSON representation of a -// proto that wraps input text (or pdf) file in +// proto that wraps input text file (or document) in // the text snippet (or document) proto and a list of // zero or more AnnotationPayload protos (called annotations), which -// have classification detail populated. A single text (or pdf) file -// will be listed only once with all its annotations, and its +// have classification detail populated. A single text file (or +// document) will be listed only once with all its annotations, and its // annotations will never be split across files. // -// If prediction for any text (or pdf) file failed (partially or +// If prediction for any input file (or document) failed (partially or // completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., // `errors_N.jsonl` files will be created (N depends on total number of // failed predictions). These files will have a JSON representation of a -// proto that wraps input text (or pdf) file followed by exactly one +// proto that wraps input file followed by exactly one // // [`google.rpc.Status`](https: // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) @@ -756,18 +1143,18 @@ message OutputConfig { // total number of inputs and annotations found. // // Each .JSONL file will contain, per line, a JSON representation of a -// proto that wraps input text (or pdf) file in +// proto that wraps input text file (or document) in // the text snippet (or document) proto and a list of // zero or more AnnotationPayload protos (called annotations), which -// have text_sentiment detail populated. A single text (or pdf) file -// will be listed only once with all its annotations, and its +// have text_sentiment detail populated. A single text file (or +// document) will be listed only once with all its annotations, and its // annotations will never be split across files. // -// If prediction for any text (or pdf) file failed (partially or +// If prediction for any input file (or document) failed (partially or // completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., // `errors_N.jsonl` files will be created (N depends on total number of // failed predictions). These files will have a JSON representation of a -// proto that wraps input text (or pdf) file followed by exactly one +// proto that wraps input file followed by exactly one // // [`google.rpc.Status`](https: // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) @@ -803,14 +1190,108 @@ message OutputConfig { // failed predictions). These files will have a JSON representation of a // proto that wraps either the "id" : "" (in case of inline) // or the document proto (in case of document) but here followed by -// exactly one [`google.rpc.Status`](https: +// exactly one +// +// [`google.rpc.Status`](https: // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) // containing only `code` and `message`. +// +// * For Tables: +// Output depends on whether +// +// [gcs_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destination] +// or +// +// [bigquery_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.bigquery_destination] +// is set (either is allowed). +// Google Cloud Storage case: +// In the created directory files `tables_1.csv`, `tables_2.csv`,..., +// `tables_N.csv` will be created, where N may be 1, and depends on +// the total number of the successfully predicted rows. +// For all CLASSIFICATION +// +// [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: +// Each .csv file will contain a header, listing all columns' +// +// [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] +// given on input followed by M target column names in the format of +// +// "<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] +// +// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>__score" where M is the number of distinct target values, +// i.e. number of distinct values in the target column of the table +// used to train the model. Subsequent lines will contain the +// respective values of successfully predicted rows, with the last, +// i.e. the target, columns having the corresponding prediction +// [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score]. +// For REGRESSION and FORECASTING +// +// [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: +// Each .csv file will contain a header, listing all columns' +// [display_name-s][google.cloud.automl.v1p1beta.display_name] +// given on input followed by the predicted target column with name +// in the format of +// +// "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] +// +// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" +// Subsequent lines will contain the respective values of +// successfully predicted rows, with the last, i.e. the target, +// column having the predicted target value. +// If prediction for any rows failed, then an additional +// `errors_1.csv`, `errors_2.csv`,..., `errors_N.csv` will be +// created (N depends on total number of failed rows). These files +// will have analogous format as `tables_*.csv`, but always with a +// single target column having +// +// [`google.rpc.Status`](https: +// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) +// represented as a JSON string, and containing only `code` and +// `message`. +// BigQuery case: +// +// [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] +// pointing to a BigQuery project must be set. In the given project a +// new dataset will be created with name +// `prediction__` +// where will be made +// BigQuery-dataset-name compatible (e.g. most special characters will +// become underscores), and timestamp will be in +// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset +// two tables will be created, `predictions`, and `errors`. +// The `predictions` table's column names will be the input columns' +// +// [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] +// followed by the target column with name in the format of +// +// "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] +// +// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" +// The input feature columns will contain the respective values of +// successfully predicted rows, with the target column having an +// ARRAY of +// +// [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload], +// represented as STRUCT-s, containing +// [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation]. +// The `errors` table contains rows for which the prediction has +// failed, it has analogous input columns while the target column name +// is in the format of +// +// "errors_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] +// +// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>", +// and as a value has +// +// [`google.rpc.Status`](https: +// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) +// represented as a STRUCT, and containing only `code` and `message`. message BatchPredictOutputConfig { // The destination of the output. oneof destination { - // Required. The Google Cloud Storage location of the directory where the - // output is to be written to. + // Required. The Google Cloud Storage location of the directory where the output is to + // be written to. GcsDestination gcs_destination = 1 [(google.api.field_behavior) = REQUIRED]; } } @@ -819,9 +1300,8 @@ message BatchPredictOutputConfig { message ModelExportOutputConfig { // The destination of the output. oneof destination { - // Required. The Google Cloud Storage location where the model is to be - // written to. This location may only be set for the following model - // formats: + // Required. The Google Cloud Storage location where the model is to be written to. + // This location may only be set for the following model formats: // "tflite", "edgetpu_tflite", "tf_saved_model", "tf_js", "core_ml". // // Under the directory given as the destination a new one with name @@ -839,7 +1319,8 @@ message ModelExportOutputConfig { // // * For Image Classification mobile-low-latency-1, mobile-versatile-1, // mobile-high-accuracy-1: - // "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js". + // "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js", + // "docker". // // * For Image Classification mobile-core-ml-low-latency-1, // mobile-core-ml-versatile-1, mobile-core-ml-high-accuracy-1: @@ -855,13 +1336,24 @@ message ModelExportOutputConfig { // devices. // * tf_saved_model - A tensorflow model in SavedModel format. // * tf_js - A [TensorFlow.js](https://www.tensorflow.org/js) model that can - // be used in the browser and in Node.js using JavaScript.x` + // be used in the browser and in Node.js using JavaScript. + // * docker - Used for Docker containers. Use the params field to customize + // the container. The container is verified to work correctly on + // ubuntu 16.04 operating system. See more at + // [containers + // + // quickstart](https: + // //cloud.google.com/vision/automl/docs/containers-gcs-quickstart) // * core_ml - Used for iOS mobile devices. string model_format = 4; // Additional model-type and format specific parameters describing the // requirements for the to be exported model files, any string must be up to // 25000 characters long. + // + // * For `docker` format: + // `cpu_architecture` - (string) "x86_64" (default). + // `gpu_architecture` - (string) "none" (default), "nvidia". map params = 2; } diff --git a/google/cloud/automl_v1/proto/io_pb2.py b/google/cloud/automl_v1/proto/io_pb2.py index b0a5f000..b7941101 100644 --- a/google/cloud/automl_v1/proto/io_pb2.py +++ b/google/cloud/automl_v1/proto/io_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1/proto/io.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,23 +12,19 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1/proto/io.proto", package="google.cloud.automl.v1", syntax="proto3", - serialized_options=_b( - "\n\032com.google.cloud.automl.v1P\001Z`__ + for more information. + + CSV file(s) with each line in format: + + :: + + ML_USE,GCS_FILE_PATH + + For ``ML_USE``, do not use ``VALIDATE``. + + ``GCS_FILE_PATH`` is the path to another .csv file that describes + training example for a given ``ML_USE``, using the following row format: + + :: + + GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,) + + Here ``GCS_FILE_PATH`` leads to a video of up to 50GB in size and up to + 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. + + ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the + length of the video, and the end time must be after the start time. Any + segment of a video which has one or more labels on it, is considered a + hard negative for all other labels. Any segment with no labels on it is + considered to be unknown. If a whole video is unknown, then it should be + mentioned just once with “,,” in place of + ``LABEL, TIME_SEGMENT_START,TIME_SEGMENT_END``. + + Sample top level CSV file: + + :: + + TRAIN,gs://folder/train_videos.csv + TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv + + Sample rows of a CSV file for a particular ML_USE: + + :: + + gs://folder/video1.avi,car,120,180.000021 + gs://folder/video1.avi,bike,150,180.000021 + gs://folder/vid2.avi,car,0,60.5 + gs://folder/vid3.avi,,, + + + + + + See `Preparing your training + data `__ for + more information. + + CSV file(s) with each line in format: + + :: + + ML_USE,GCS_FILE_PATH + + For ``ML_USE``, do not use ``VALIDATE``. + + ``GCS_FILE_PATH`` is the path to another .csv file that describes + training example for a given ``ML_USE``, using the following row format: + + :: + + GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX + + or + + :: + + GCS_FILE_PATH,,,,,,,,,, + + Here ``GCS_FILE_PATH`` leads to a video of up to 50GB in size and up to + 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. Providing + ``INSTANCE_ID``\ s can help to obtain a better model. When a specific + labeled entity leaves the video frame, and shows up afterwards it is not + required, albeit preferable, that the same ``INSTANCE_ID`` is given to + it. + + ``TIMESTAMP`` must be within the length of the video, the + ``BOUNDING_BOX`` is assumed to be drawn on the closest video’s frame to + the ``TIMESTAMP``. Any mentioned by the ``TIMESTAMP`` frame is expected + to be exhaustively labeled and no more than 500 ``BOUNDING_BOX``-es per + frame are allowed. If a whole video is unknown, then it should be + mentioned just once with “,,,,,,,,,,” in place of + ``LABEL, [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX``. + + Sample top level CSV file: + + :: + + TRAIN,gs://folder/train_videos.csv + TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv + + Seven sample rows of a CSV file for a particular ML_USE: + + :: + + gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 + gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 + gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 + gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, + gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, + gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, + gs://folder/video2.avi,,,,,,,,,,, @@ -749,7 +860,7 @@ :: - ML_USE,GCS_FILE_PATH + ML_USE,GCS_FILE_PATH - ``ML_USE`` - Identifies the data set that the current row (file) applies to. This value can be one of the following: @@ -773,9 +884,9 @@ :: - TRAIN,gs://folder/file1.jsonl - VALIDATE,gs://folder/file2.jsonl - TEST,gs://folder/file3.jsonl + TRAIN,gs://folder/file1.jsonl + VALIDATE,gs://folder/file2.jsonl + TEST,gs://folder/file3.jsonl **In-line JSONL files** @@ -787,8 +898,8 @@ be separated using line breaks (``\\n``). The supplied text must be annotated exhaustively. For example, if you - include the text "horse", but do not label it as "animal", then "horse" - is assumed to not be an "animal". + include the text “horse”, but do not label it as “animal”, then “horse” + is assumed to not be an “animal”. Any given text snippet content must have 30,000 characters or less, and also be UTF-8 NFC encoded. ASCII is accepted as it is UTF-8 NFC encoded. @@ -797,137 +908,138 @@ :: - { + { + "text_snippet": { + "content": "dog car cat" + }, + "annotations": [ + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 0, "end_offset": 2} + } + }, + { + "display_name": "vehicle", + "text_extraction": { + "text_segment": {"start_offset": 4, "end_offset": 6} + } + }, + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 8, "end_offset": 10} + } + } + ] + }\\n + { "text_snippet": { - "content": "dog car cat" + "content": "This dog is good." }, "annotations": [ { "display_name": "animal", "text_extraction": { - "text_segment": {"start_offset": 0, "end_offset": 2} - } - }, - { - "display_name": "vehicle", - "text_extraction": { - "text_segment": {"start_offset": 4, "end_offset": 6} - } - }, - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 8, "end_offset": 10} + "text_segment": {"start_offset": 5, "end_offset": 7} } } - ] - }\\n - { - "text_snippet": { - "content": "This dog is good." - }, - "annotations": [ - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 5, "end_offset": 7} - } - } - ] - } + ] + } **JSONL files that reference documents** .JSONL files contain, per line, a JSON document that wraps a - ``input_config`` that contains the path to a source PDF document. - Multiple JSON documents can be separated using line breaks - (``\\n``). + ``input_config`` that contains the path to a source document. Multiple + JSON documents can be separated using line breaks (``\\n``). + + Supported document extensions: .PDF, .TIF, .TIFF For example: :: - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] - } - } - } - }\\n - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] - } - } - } - } + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] + } + } + } + }\\n + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] + } + } + } + } - **In-line JSONL files with PDF layout information** + **In-line JSONL files with document layout information** - **Note:** You can only annotate PDF files using the UI. The format - described below applies to annotated PDF files exported using the UI or + **Note:** You can only annotate documents using the UI. The format + described below applies to annotated documents exported using the UI or ``exportData``. - In-line .JSONL files for PDF documents contain, per line, a JSON - document that wraps a ``document`` field that provides the textual - content of the PDF document and the layout information. + In-line .JSONL files for documents contain, per line, a JSON document + that wraps a ``document`` field that provides the textual content of the + document and the layout information. For example: :: - { - "document": { - "document_text": { - "content": "dog car cat" - } - "layout": [ - { - "text_segment": { - "start_offset": 0, - "end_offset": 11, + { + "document": { + "document_text": { + "content": "dog car cat" + } + "layout": [ + { + "text_segment": { + "start_offset": 0, + "end_offset": 11, + }, + "page_number": 1, + "bounding_poly": { + "normalized_vertices": [ + {"x": 0.1, "y": 0.1}, + {"x": 0.1, "y": 0.3}, + {"x": 0.3, "y": 0.3}, + {"x": 0.3, "y": 0.1}, + ], }, - "page_number": 1, - "bounding_poly": { - "normalized_vertices": [ - {"x": 0.1, "y": 0.1}, - {"x": 0.1, "y": 0.3}, - {"x": 0.3, "y": 0.3}, - {"x": 0.3, "y": 0.1}, - ], - }, - "text_segment_type": TOKEN, - } - ], - "document_dimensions": { - "width": 8.27, - "height": 11.69, - "unit": INCH, - } - "page_count": 3, - }, - "annotations": [ - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 0, "end_offset": 3} - } - }, - { - "display_name": "vehicle", - "text_extraction": { - "text_segment": {"start_offset": 4, "end_offset": 7} - } - }, - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 8, "end_offset": 11} - } - }, - ], + "text_segment_type": TOKEN, + } + ], + "document_dimensions": { + "width": 8.27, + "height": 11.69, + "unit": INCH, + } + "page_count": 3, + }, + "annotations": [ + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 0, "end_offset": 3} + } + }, + { + "display_name": "vehicle", + "text_extraction": { + "text_segment": {"start_offset": 4, "end_offset": 7} + } + }, + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 8, "end_offset": 11} + } + }, + ], @@ -941,7 +1053,7 @@ :: - ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,... + ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,... - ``ML_USE`` - Identifies the data set that the current row (file) applies to. This value can be one of the following: @@ -955,11 +1067,11 @@ - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a pattern. If the column content is a valid Google Cloud Storage file - path, that is, prefixed by "gs://", it is treated as a + path, that is, prefixed by “gs://”, it is treated as a ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in double - quotes (""), it is treated as a ``TEXT_SNIPPET``. For + quotes ("“), it is treated as a ``TEXT_SNIPPET``. For ``GCS_FILE_PATH``, the path must lead to a file with supported - extension and UTF-8 encoding, for example, "gs://folder/content.txt" + extension and UTF-8 encoding, for example,”gs://folder/content.txt" AutoML imports the file content as a text snippet. For ``TEXT_SNIPPET``, AutoML imports the column content excluding quotes. In both cases, size of the content must be 10MB or less in size. For @@ -967,8 +1079,10 @@ in size. For the ``MULTICLASS`` classification type, at most one ``LABEL`` is - allowed. The ``ML_USE`` and ``LABEL`` columns are optional. Supported - file extensions: .TXT, .PDF, .ZIP + allowed. + + The ``ML_USE`` and ``LABEL`` columns are optional. Supported file + extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP A maximum of 100 unique labels are allowed per CSV row. @@ -976,10 +1090,10 @@ :: - TRAIN,"They have bad food and very rude",RudeService,BadFood - gs://folder/content.txt,SlowService - TEST,gs://folder/document.pdf - VALIDATE,gs://folder/text_files.zip,BadFood + TRAIN,"They have bad food and very rude",RudeService,BadFood + gs://folder/content.txt,SlowService + TEST,gs://folder/document.pdf + VALIDATE,gs://folder/text_files.zip,BadFood @@ -993,7 +1107,7 @@ :: - ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT + ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT - ``ML_USE`` - Identifies the data set that the current row (file) applies to. This value can be one of the following: @@ -1007,11 +1121,11 @@ - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a pattern. If the column content is a valid Google Cloud Storage file - path, that is, prefixed by "gs://", it is treated as a + path, that is, prefixed by “gs://”, it is treated as a ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in double - quotes (""), it is treated as a ``TEXT_SNIPPET``. For + quotes ("“), it is treated as a ``TEXT_SNIPPET``. For ``GCS_FILE_PATH``, the path must lead to a file with supported - extension and UTF-8 encoding, for example, "gs://folder/content.txt" + extension and UTF-8 encoding, for example,”gs://folder/content.txt" AutoML imports the file content as a text snippet. For ``TEXT_SNIPPET``, AutoML imports the column content excluding quotes. In both cases, size of the content must be 128kB or less in size. For @@ -1019,32 +1133,48 @@ in size. The ``ML_USE`` and ``SENTIMENT`` columns are optional. Supported file - extensions: .TXT, .PDF, .ZIP + extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP - ``SENTIMENT`` - An integer between 0 and - Dataset.text\_sentiment\_dataset\_metadata.sentiment\_max - (inclusive). Describes the ordinal of the sentiment - higher value - means a more positive sentiment. All the values are completely - relative, i.e. neither 0 needs to mean a negative or neutral - sentiment nor sentiment\_max needs to mean a positive one - it is - just required that 0 is the least positive sentiment in the data, and - sentiment\_max is the most positive one. The SENTIMENT shouldn't be - confused with "score" or "magnitude" from the previous Natural - Language Sentiment Analysis API. All SENTIMENT values between 0 and - sentiment\_max must be represented in the imported data. On - prediction the same 0 to sentiment\_max range will be used. The - difference between neighboring sentiment values needs not to be - uniform, e.g. 1 and 2 may be similar whereas the difference between 2 - and 3 may be large. + Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive). + Describes the ordinal of the sentiment - higher value means a more + positive sentiment. All the values are completely relative, + i.e. neither 0 needs to mean a negative or neutral sentiment nor + sentiment_max needs to mean a positive one - it is just required that + 0 is the least positive sentiment in the data, and sentiment_max is + the most positive one. The SENTIMENT shouldn’t be confused with + “score” or “magnitude” from the previous Natural Language Sentiment + Analysis API. All SENTIMENT values between 0 and sentiment_max must + be represented in the imported data. On prediction the same 0 to + sentiment_max range will be used. The difference between neighboring + sentiment values needs not to be uniform, e.g. 1 and 2 may be similar + whereas the difference between 2 and 3 may be large. Sample rows: :: - TRAIN,"@freewrytin this is way too good for your product",2 - gs://folder/content.txt,3 - TEST,gs://folder/document.pdf - VALIDATE,gs://folder/text_files.zip,2 + TRAIN,"@freewrytin this is way too good for your product",2 + gs://folder/content.txt,3 + TEST,gs://folder/document.pdf + VALIDATE,gs://folder/text_files.zip,2 + + + + + + + + + + **For bigquery_source:** + + An URI of a BigQuery table. The user data size of the BigQuery table + must be 100GB or smaller. + + An imported table must have between 2 and 1,000 columns, inclusive, and + between 1000 and 100,000,000 rows, inclusive. There are at most 5 import + data running in parallel. @@ -1053,45 +1183,59 @@ **Input field definitions:** ``ML_USE`` - ("TRAIN" \| "VALIDATE" \| "TEST" \| "UNASSIGNED") Describes how the - given example (file) should be used for model training. "UNASSIGNED" - can be used when user has no preference. + (“TRAIN” \| “VALIDATE” \| “TEST” \| “UNASSIGNED”) Describes how the + given example (file) should be used for model training. “UNASSIGNED” + can be used when user has no preference. ``GCS_FILE_PATH`` - The path to a file on Google Cloud Storage. For example, - "gs://folder/image1.png". + The path to a file on Google Cloud Storage. For example, + “gs://folder/image1.png”. ``LABEL`` - A display name of an object on an image, video etc., e.g. "dog". - Must be up to 32 characters long and can consist only of ASCII Latin - letters A-Z and a-z, underscores(\_), and ASCII digits 0-9. For each - label an AnnotationSpec is created which display\_name becomes the - label; AnnotationSpecs are given back in predictions. + A display name of an object on an image, video etc., e.g. “dog”. Must + be up to 32 characters long and can consist only of ASCII Latin + letters A-Z and a-z, underscores(_), and ASCII digits 0-9. For each + label an AnnotationSpec is created which display_name becomes the + label; AnnotationSpecs are given back in predictions. + ``INSTANCE_ID`` + A positive integer that identifies a specific instance of a labeled + entity on an example. Used e.g. to track two cars on a video while + being able to tell apart which one is which. ``BOUNDING_BOX`` - (``VERTEX,VERTEX,VERTEX,VERTEX`` \| ``VERTEX,,,VERTEX,,``) A - rectangle parallel to the frame of the example (image, video). If 4 - vertices are given they are connected by edges in the order - provided, if 2 are given they are recognized as diagonally opposite - vertices of the rectangle. + (``VERTEX,VERTEX,VERTEX,VERTEX`` \| ``VERTEX,,,VERTEX,,``) A + rectangle parallel to the frame of the example (image, video). If 4 + vertices are given they are connected by edges in the order provided, + if 2 are given they are recognized as diagonally opposite vertices of + the rectangle. ``VERTEX`` - (``COORDINATE,COORDINATE``) First coordinate is horizontal (x), the - second is vertical (y). + (``COORDINATE,COORDINATE``) First coordinate is horizontal (x), the + second is vertical (y). ``COORDINATE`` - A float in 0 to 1 range, relative to total length of image or video - in given dimension. For fractions the leading non-decimal 0 can be - omitted (i.e. 0.3 = .3). Point 0,0 is in top left. + A float in 0 to 1 range, relative to total length of image or video + in given dimension. For fractions the leading non-decimal 0 can be + omitted (i.e. 0.3 = .3). Point 0,0 is in top left. + ``TIME_SEGMENT_START`` + (``TIME_OFFSET``) Expresses a beginning, inclusive, of a time segment + within an example that has a time dimension (e.g. video). + ``TIME_SEGMENT_END`` + (``TIME_OFFSET``) Expresses an end, exclusive, of a time segment + within n example that has a time dimension (e.g. video). + ``TIME_OFFSET`` + A number of seconds as measured from the start of an example + (e.g. video). Fractions are allowed, up to a microsecond precision. + “inf” is allowed, and it means the end of the example. ``TEXT_SNIPPET`` - The content of a text snippet, UTF-8 encoded, enclosed within double - quotes (""). + The content of a text snippet, UTF-8 encoded, enclosed within double + quotes (""). ``DOCUMENT`` - A field that provides the textual content with document and the - layout information. + A field that provides the textual content with document and the + layout information. **Errors:** - If any of the provided CSV files can't be parsed or if more than certain + If any of the provided CSV files can’t be parsed or if more than certain percent of CSV rows cannot be processed then the operation fails and nothing is imported. Regardless of overall success or failure the per-row failures, up to a certain count cap, is listed in - Operation.metadata.partial\_failures. + Operation.metadata.partial_failures. Attributes: @@ -1105,30 +1249,27 @@ params: Additional domain-specific parameters describing the semantic of the imported data, any string must be up to 25000 - characters long. - """, - # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.InputConfig) - ), -) -_sym_db.RegisterMessage(InputConfig) -_sym_db.RegisterMessage(InputConfig.ParamsEntry) - -BatchPredictInputConfig = _reflection.GeneratedProtocolMessageType( - "BatchPredictInputConfig", - (_message.Message,), - dict( - DESCRIPTOR=_BATCHPREDICTINPUTCONFIG, - __module__="google.cloud.automl_v1.proto.io_pb2", - __doc__="""Input configuration for BatchPredict Action. + characters long. - The format of input depends on the ML problem of the model used for - prediction. As input source the - [gcs\_source][google.cloud.automl.v1.InputConfig.gcs\_source] is - expected, unless specified otherwise. - The formats are represented in EBNF with commas being literal and with - non-terminal symbols defined near the end of this comment. The formats - are: + + One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH + + The Google Cloud Storage location of an image of up to 30MB in size. + Supported extensions: .JPEG, .GIF, .PNG. This path is treated as the ID + in the batch predict output. + + Sample rows: + + :: + + gs://folder/image1.jpeg + gs://folder/image2.gif + gs://folder/image3.png @@ -1138,18 +1279,100 @@ :: - GCS_FILE_PATH + GCS_FILE_PATH + + The Google Cloud Storage location of an image of up to 30MB in size. + Supported extensions: .JPEG, .GIF, .PNG. This path is treated as the ID + in the batch predict output. + + Sample rows: + + :: + + gs://folder/image1.jpeg + gs://folder/image2.gif + gs://folder/image3.png + + + + + + + + + + One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END + + ``GCS_FILE_PATH`` is the Google Cloud Storage location of video up to + 50GB in size and up to 3h in duration duration. Supported extensions: + .MOV, .MPEG4, .MP4, .AVI. + + ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the + length of the video, and the end time must be after the start time. + + Sample rows: + + :: + + gs://folder/video1.mp4,10,40 + gs://folder/video1.mp4,20,60 + gs://folder/vid2.mov,0,inf + + + + + + One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END + + ``GCS_FILE_PATH`` is the Google Cloud Storage location of video up to + 50GB in size and up to 3h in duration duration. Supported extensions: + .MOV, .MPEG4, .MP4, .AVI. + + ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the + length of the video, and the end time must be after the start time. + + Sample rows: + + :: + + gs://folder/video1.mp4,10,40 + gs://folder/video1.mp4,20,60 + gs://folder/vid2.mov,0,inf + + + + + + + + + + One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH ``GCS_FILE_PATH`` is the Google Cloud Storage location of a text file. - Supported file extensions: .TXT, .PDF Text files can be no larger than - 10MB in size. + Supported file extensions: .TXT, .PDF, .TIF, .TIFF + + Text files can be no larger than 10MB in size. Sample rows: :: - gs://folder/text1.txt - gs://folder/text2.pdf + gs://folder/text1.txt + gs://folder/text2.pdf + gs://folder/text3.tif @@ -1159,18 +1382,20 @@ :: - GCS_FILE_PATH + GCS_FILE_PATH ``GCS_FILE_PATH`` is the Google Cloud Storage location of a text file. - Supported file extensions: .TXT, .PDF Text files can be no larger than - 128kB in size. + Supported file extensions: .TXT, .PDF, .TIF, .TIFF + + Text files can be no larger than 128kB in size. Sample rows: :: - gs://folder/text1.txt - gs://folder/text2.pdf + gs://folder/text1.txt + gs://folder/text2.pdf + gs://folder/text3.tif @@ -1182,70 +1407,93 @@ Each JSONL file contains a per line a proto that wraps a temporary user-assigned TextSnippet ID (string up to 2000 characters long) called - "id", a TextSnippet proto (in JSON representation) and zero or more + “id”, a TextSnippet proto (in JSON representation) and zero or more TextFeature protos. Any given text snippet content must have 30,000 characters or less, and also be UTF-8 NFC encoded (ASCII already is). The IDs provided should be unique. Each document JSONL file contains, per line, a proto that wraps a - Document proto with ``input_config`` set. Only PDF documents are - currently supported, and each PDF document cannot exceed 2MB in size. + Document proto with ``input_config`` set. Each document cannot exceed + 2MB in size. + + Supported document extensions: .PDF, .TIF, .TIFF Each JSONL file must not exceed 100MB in size, and no more than 20 JSONL files may be passed. Sample inline JSONL file (Shown with artificial line breaks. Actual line - breaks are denoted by "``\\n``".): + breaks are denoted by “``\\n``”.): :: + { + "id": "my_first_id", + "text_snippet": { "content": "dog car cat"}, + "text_features": [ + { + "text_segment": {"start_offset": 4, "end_offset": 6}, + "structural_type": PARAGRAPH, + "bounding_poly": { + "normalized_vertices": [ + {"x": 0.1, "y": 0.1}, + {"x": 0.1, "y": 0.3}, + {"x": 0.3, "y": 0.3}, + {"x": 0.3, "y": 0.1}, + ] + }, + } + ], + }\\n { - "id": "my_first_id", - "text_snippet": { "content": "dog car cat"}, - "text_features": [ - { - "text_segment": {"start_offset": 4, "end_offset": 6}, - "structural_type": PARAGRAPH, - "bounding_poly": { - "normalized_vertices": [ - {"x": 0.1, "y": 0.1}, - {"x": 0.1, "y": 0.3}, - {"x": 0.3, "y": 0.3}, - {"x": 0.3, "y": 0.1}, - ] - }, - } - ], - }\\n - { - "id": "2", - "text_snippet": { - "content": "Extended sample content", - "mime_type": "text/plain" - } - } + "id": "2", + "text_snippet": { + "content": "Extended sample content", + "mime_type": "text/plain" + } + } Sample document JSONL file (Shown with artificial line breaks. Actual - line breaks are denoted by "``\\n``".): + line breaks are denoted by “``\\n``”.): :: - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] - } - } - } - }\\n - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] - } - } - } - } + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] + } + } + } + }\\n + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] + } + } + } + } + + + + + + + + + + **For bigquery_source:** + + The URI of a BigQuery table. The user data size of the BigQuery table + must be 100GB or smaller. + + The column names must contain the model’s + + [input_feature_column_specs’][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs] + [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] (order + doesn’t matter). The columns corresponding to the model’s input feature + column specs must contain values compatible with the column spec’s data + types. Prediction on all the rows of the table will be attempted. @@ -1254,16 +1502,26 @@ **Input field definitions:** ``GCS_FILE_PATH`` - The path to a file on Google Cloud Storage. For example, - "gs://folder/video.avi". + The path to a file on Google Cloud Storage. For example, + “gs://folder/video.avi”. + ``TIME_SEGMENT_START`` + (``TIME_OFFSET``) Expresses a beginning, inclusive, of a time segment + within an example that has a time dimension (e.g. video). + ``TIME_SEGMENT_END`` + (``TIME_OFFSET``) Expresses an end, exclusive, of a time segment + within n example that has a time dimension (e.g. video). + ``TIME_OFFSET`` + A number of seconds as measured from the start of an example + (e.g. video). Fractions are allowed, up to a microsecond precision. + “inf” is allowed, and it means the end of the example. **Errors:** - If any of the provided CSV files can't be parsed or if more than certain + If any of the provided CSV files can’t be parsed or if more than certain percent of CSV rows cannot be processed then the operation fails and prediction does not happen. Regardless of overall success or failure the per-row failures, up to a certain count cap, will be listed in - Operation.metadata.partial\_failures. + Operation.metadata.partial_failures. Attributes: @@ -1274,17 +1532,17 @@ content. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.BatchPredictInputConfig) - ), + }, ) _sym_db.RegisterMessage(BatchPredictInputConfig) DocumentInputConfig = _reflection.GeneratedProtocolMessageType( "DocumentInputConfig", (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENTINPUTCONFIG, - __module__="google.cloud.automl_v1.proto.io_pb2", - __doc__="""Input configuration of a + { + "DESCRIPTOR": _DOCUMENTINPUTCONFIG, + "__module__": "google.cloud.automl_v1.proto.io_pb2", + "__doc__": """Input configuration of a [Document][google.cloud.automl.v1.Document]. @@ -1295,76 +1553,41 @@ Supported extensions: .PDF. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DocumentInputConfig) - ), + }, ) _sym_db.RegisterMessage(DocumentInputConfig) OutputConfig = _reflection.GeneratedProtocolMessageType( "OutputConfig", (_message.Message,), - dict( - DESCRIPTOR=_OUTPUTCONFIG, - __module__="google.cloud.automl_v1.proto.io_pb2", - __doc__="""Output configuration for ExportData. - - As destination the - [gcs\_destination][google.cloud.automl.v1.OutputConfig.gcs\_destination] - must be set unless specified otherwise for a domain. If gcs\_destination - is set then in the given directory a new directory is created. Its name - will be "export\_data--", where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ - ISO-8601 format. Only ground truth annotations are exported (not - approved annotations are not exported). - - The outputs correspond to how the data was imported, and may be used as - input to import data. The output formats are represented as EBNF with - literal commas and same non-terminal symbols definitions are these in - import data's [InputConfig][google.cloud.automl.v1.InputConfig]: - - - For Image Classification: CSV file(s) ``image_classification_1.csv``, - ``image_classification_2.csv``,...,\ ``image_classification_N.csv``\ with - each line in format: ML\_USE,GCS\_FILE\_PATH,LABEL,LABEL,... where - GCS\_FILE\_PATHs point at the original, source locations of the - imported images. For MULTICLASS classification type, there can be at - most one LABEL per example. - - - For Image Object Detection: CSV file(s) - ``image_object_detection_1.csv``, - ``image_object_detection_2.csv``,...,\ ``image_object_detection_N.csv`` - with each line in format: - ML\_USE,GCS\_FILE\_PATH,[LABEL],(BOUNDING\_BOX \| ,,,,,,,) where - GCS\_FILE\_PATHs point at the original, source locations of the - imported images. - - - For Text Classification: In the created directory CSV file(s) - ``text_classification_1.csv``, ``text_classification_2.csv``, - ...,\ ``text_classification_N.csv`` will be created where N depends - on the total number of examples exported. Each line in the CSV is of - the format: ML\_USE,GCS\_FILE\_PATH,LABEL,LABEL,... where - GCS\_FILE\_PATHs point at the exported .txt files containing the text - content of the imported example. For MULTICLASS classification type, - there will be at most one LABEL per example. - - - For Text Sentiment: In the created directory CSV file(s) - ``text_sentiment_1.csv``, ``text_sentiment_2.csv``, - ...,\ ``text_sentiment_N.csv`` will be created where N depends on the - total number of examples exported. Each line in the CSV is of the - format: ML\_USE,GCS\_FILE\_PATH,SENTIMENT where GCS\_FILE\_PATHs - point at the exported .txt files containing the text content of the - imported example. - - - For Text Extraction: CSV file ``text_extraction.csv``, with each line - in format: ML\_USE,GCS\_FILE\_PATH GCS\_FILE\_PATH leads to a .JSONL - (i.e. JSON Lines) file which contains, per line, a proto that wraps a - TextSnippet proto (in json representation) followed by - AnnotationPayload protos (called annotations). If initially documents - had been imported, the JSONL will point at the original, source - locations of the imported documents. - - - For Translation: CSV file ``translation.csv``, with each line in - format: ML\_USE,GCS\_FILE\_PATH GCS\_FILE\_PATH leads to a .TSV file - which describes examples that have given ML\_USE, using the following - row format per line: TEXT\_SNIPPET (in source language) - \\tTEXT\_SNIPPET (in target language) + { + "DESCRIPTOR": _OUTPUTCONFIG, + "__module__": "google.cloud.automl_v1.proto.io_pb2", + "__doc__": """\* For Translation: CSV file ``translation.csv``, with + each line in format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV + file which describes examples that have given ML_USE, using the + following row format per line: TEXT_SNIPPET (in source language) + \\tTEXT_SNIPPET (in target language) + + - For Tables: Output depends on whether the dataset was imported from + Google Cloud Storage or BigQuery. Google Cloud Storage case: + + [gcs_destination][google.cloud.automl.v1p1beta.OutputConfig.gcs_destination] + must be set. Exported are CSV file(s) ``tables_1.csv``, + ``tables_2.csv``,…,\ ``tables_N.csv`` with each having as header line + the table’s column names, and all other lines contain values for the + header columns. BigQuery case: + + [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] + pointing to a BigQuery project must be set. In the given project a new + dataset will be created with name + + ``export_data__`` + where will be made BigQuery-dataset-name compatible (e.g. most special + characters will become underscores), and timestamp will be in + YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In that dataset a + new table called ``primary_table`` will be created, and filled with + precisely the same data as this obtained on import. Attributes: @@ -1373,112 +1596,292 @@ gcs_destination: Required. The Google Cloud Storage location where the output is to be written to. For Image Object Detection, Text - Extraction in the given directory a new directory will be - created with name: export\_data-- where timestamp is in YYYY- - MM-DDThh:mm:ss.sssZ ISO-8601 format. All export output will be - written into that directory. + Extraction, Video Classification and Tables, in the given + directory a new directory will be created with name: + export_data-- where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ + ISO-8601 format. All export output will be written into that + directory. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.OutputConfig) - ), + }, ) _sym_db.RegisterMessage(OutputConfig) BatchPredictOutputConfig = _reflection.GeneratedProtocolMessageType( "BatchPredictOutputConfig", (_message.Message,), - dict( - DESCRIPTOR=_BATCHPREDICTOUTPUTCONFIG, - __module__="google.cloud.automl_v1.proto.io_pb2", - __doc__="""Output configuration for BatchPredict Action. + { + "DESCRIPTOR": _BATCHPREDICTOUTPUTCONFIG, + "__module__": "google.cloud.automl_v1.proto.io_pb2", + "__doc__": """Output configuration for BatchPredict Action. As destination the - [gcs\_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs\_destination] - must be set unless specified otherwise for a domain. If gcs\_destination + [gcs_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs_destination] + must be set unless specified otherwise for a domain. If gcs_destination is set then in the given directory a new directory is created. Its name - will be "prediction--", where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ + will be “prediction--”, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends on the ML problem the predictions are made for. + - For Image Classification: In the created directory files + ``image_classification_1.jsonl``, + ``image_classification_2.jsonl``,…,\ ``image_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total number of + the successfully predicted images and annotations. A single image + will be listed only once with all its annotations, and its + annotations will never be split across files. Each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + image’s “ID” : “” followed by a list of zero or more + AnnotationPayload protos (called annotations), which have + classification detail populated. If prediction for any image failed + (partially or completely), then an additional ``errors_1.jsonl``, + ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N + depends on total number of failed predictions). These files will have + a JSON representation of a proto that wraps the same “ID” : “” but + here followed by exactly one + + ```google.rpc.Status`` `__ + containing only ``code`` and ``message``\ fields. + + - For Image Object Detection: In the created directory files + ``image_object_detection_1.jsonl``, + ``image_object_detection_2.jsonl``,…,\ ``image_object_detection_N.jsonl`` + will be created, where N may be 1, and depends on the total number of + the successfully predicted images and annotations. Each .JSONL file + will contain, per line, a JSON representation of a proto that wraps + image’s “ID” : “” followed by a list of zero or more + AnnotationPayload protos (called annotations), which have + image_object_detection detail populated. A single image will be + listed only once with all its annotations, and its annotations will + never be split across files. If prediction for any image failed + (partially or completely), then additional ``errors_1.jsonl``, + ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N + depends on total number of failed predictions). These files will have + a JSON representation of a proto that wraps the same “ID” : “” but + here followed by exactly one + + ```google.rpc.Status`` `__ + containing only ``code`` and ``message``\ fields. \* For Video + Classification: In the created directory a video_classification.csv + file, and a .JSON file per each video classification requested in the + input (i.e. each line in given CSV(s)), will be created. + + :: + + The format of video_classification.csv is: + + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS + where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to + 1 the prediction input lines (i.e. video_classification.csv has + precisely the same number of lines as the prediction input had.) + JSON_FILE_NAME = Name of .JSON file in the output directory, which + contains prediction responses for the video time segment. STATUS = “OK” + if prediction completed successfully, or an error code with message + otherwise. If STATUS is not “OK” then the .JSON file for that line may + not exist or be empty. + + :: + + Each .JSON file, assuming STATUS is "OK", will contain a list of + AnnotationPayload protos in JSON format, which are the predictions + for the video time segment the file is assigned to in the + video_classification.csv. All AnnotationPayload protos will have + video_classification field set, and will be sorted by + video_classification.type field (note that the returned types are + governed by `classifaction_types` parameter in + [PredictService.BatchPredictRequest.params][]). + + - For Video Object Tracking: In the created directory a + video_object_tracking.csv file will be created, and multiple files + video_object_trackinng_1.json, video_object_trackinng_2.json,…, + video_object_trackinng_N.json, where N is the number of requests in + the input (i.e. the number of lines in given CSV(s)). + + :: + + The format of video_object_tracking.csv is: + + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS + where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to + 1 the prediction input lines (i.e. video_object_tracking.csv has + precisely the same number of lines as the prediction input had.) + JSON_FILE_NAME = Name of .JSON file in the output directory, which + contains prediction responses for the video time segment. STATUS = “OK” + if prediction completed successfully, or an error code with message + otherwise. If STATUS is not “OK” then the .JSON file for that line may + not exist or be empty. + + :: + + Each .JSON file, assuming STATUS is "OK", will contain a list of + AnnotationPayload protos in JSON format, which are the predictions + for each frame of the video time segment the file is assigned to in + video_object_tracking.csv. All AnnotationPayload protos will have + video_object_tracking field set. + - For Text Classification: In the created directory files ``text_classification_1.jsonl``, - ``text_classification_2.jsonl``,...,\ ``text_classification_N.jsonl`` + ``text_classification_2.jsonl``,…,\ ``text_classification_N.jsonl`` will be created, where N may be 1, and depends on the total number of inputs and annotations found. :: - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text (or pdf) file in - the text snippet (or document) proto and a list of - zero or more AnnotationPayload protos (called annotations), which - have classification detail populated. A single text (or pdf) file - will be listed only once with all its annotations, and its - annotations will never be split across files. + Each .JSONL file will contain, per line, a JSON representation of a + proto that wraps input text file (or document) in + the text snippet (or document) proto and a list of + zero or more AnnotationPayload protos (called annotations), which + have classification detail populated. A single text file (or + document) will be listed only once with all its annotations, and its + annotations will never be split across files. - If prediction for any text (or pdf) file failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input text (or pdf) file followed by exactly one + If prediction for any input file (or document) failed (partially or + completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input file followed by exactly one ```google.rpc.Status`` `__ containing only ``code`` and ``message``. - For Text Sentiment: In the created directory files ``text_sentiment_1.jsonl``, - ``text_sentiment_2.jsonl``,...,\ ``text_sentiment_N.jsonl`` will be + ``text_sentiment_2.jsonl``,…,\ ``text_sentiment_N.jsonl`` will be created, where N may be 1, and depends on the total number of inputs and annotations found. :: - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text (or pdf) file in - the text snippet (or document) proto and a list of - zero or more AnnotationPayload protos (called annotations), which - have text_sentiment detail populated. A single text (or pdf) file - will be listed only once with all its annotations, and its - annotations will never be split across files. + Each .JSONL file will contain, per line, a JSON representation of a + proto that wraps input text file (or document) in + the text snippet (or document) proto and a list of + zero or more AnnotationPayload protos (called annotations), which + have text_sentiment detail populated. A single text file (or + document) will be listed only once with all its annotations, and its + annotations will never be split across files. - If prediction for any text (or pdf) file failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input text (or pdf) file followed by exactly one + If prediction for any input file (or document) failed (partially or + completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input file followed by exactly one ```google.rpc.Status`` `__ containing only ``code`` and ``message``. - For Text Extraction: In the created directory files ``text_extraction_1.jsonl``, - ``text_extraction_2.jsonl``,...,\ ``text_extraction_N.jsonl`` will be + ``text_extraction_2.jsonl``,…,\ ``text_extraction_N.jsonl`` will be created, where N may be 1, and depends on the total number of inputs and annotations found. The contents of these .JSONL file(s) depend on whether the input used inline text, or documents. If input was inline, then each .JSONL file will contain, per line, a JSON - representation of a proto that wraps given in request text snippet's - "id" (if specified), followed by input text snippet, and a list of + representation of a proto that wraps given in request text snippet’s + “id” (if specified), followed by input text snippet, and a list of zero or more AnnotationPayload protos (called annotations), which - have text\_extraction detail populated. A single text snippet will be + have text_extraction detail populated. A single text snippet will be listed only once with all its annotations, and its annotations will never be split across files. If input used documents, then each .JSONL file will contain, per line, a JSON representation of a proto that wraps given in request document proto, followed by its OCR-ed representation in the form of a text snippet, finally followed by a list of zero or more AnnotationPayload protos (called annotations), - which have text\_extraction detail populated and refer, via their + which have text_extraction detail populated and refer, via their indices, to the OCR-ed text snippet. A single document (and its text snippet) will be listed only once with all its annotations, and its annotations will never be split across files. If prediction for any text snippet failed (partially or completely), then additional - ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` files + ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N depends on total number of failed predictions). These files will have a JSON representation of a proto that wraps - either the "id" : "" (in case of inline) or the document proto (in + either the “id” : “” (in case of inline) or the document proto (in case of document) but here followed by exactly one - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``. + + ```google.rpc.Status`` `__ + containing only ``code`` and ``message``. + + - For Tables: Output depends on whether + + [gcs_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destination] + or + + [bigquery_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.bigquery_destination] + is set (either is allowed). Google Cloud Storage case: In the created + directory files ``tables_1.csv``, ``tables_2.csv``,…, ``tables_N.csv`` + will be created, where N may be 1, and depends on the total number of + the successfully predicted rows. For all CLASSIFICATION + + [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: + Each .csv file will contain a header, listing all columns’ + + [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] + given on input followed by M target column names in the format of + + "<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>\_\_score" + where M is the number of distinct target values, i.e. number of distinct + values in the target column of the table used to train the model. + Subsequent lines will contain the respective values of successfully + predicted rows, with the last, i.e. the target, columns having the + corresponding prediction + [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score]. For + REGRESSION and FORECASTING + + [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: + Each .csv file will contain a header, listing all columns’ + [display_name-s][google.cloud.automl.v1p1beta.display_name] given on + input followed by the predicted target column with name in the format of + + "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" + Subsequent lines will contain the respective values of successfully + predicted rows, with the last, i.e. the target, column having the + predicted target value. If prediction for any rows failed, then an + additional ``errors_1.csv``, ``errors_2.csv``,…, ``errors_N.csv`` will + be created (N depends on total number of failed rows). These files will + have analogous format as ``tables_*.csv``, but always with a single + target column having + + ```google.rpc.Status`` `__ + represented as a JSON string, and containing only ``code`` and + ``message``. BigQuery case: + + [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] + pointing to a BigQuery project must be set. In the given project a new + dataset will be created with name + ``prediction__`` where + will be made BigQuery-dataset-name compatible (e.g. most special + characters will become underscores), and timestamp will be in + YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In the dataset two + tables will be created, ``predictions``, and ``errors``. The + ``predictions`` table’s column names will be the input columns’ + + [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] + followed by the target column with name in the format of + + "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" + The input feature columns will contain the respective values of + successfully predicted rows, with the target column having an ARRAY of + + [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload], + represented as STRUCT-s, containing + [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation]. The + ``errors`` table contains rows for which the prediction has failed, it + has analogous input columns while the target column name is in the + format of + + "errors_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>", + and as a value has + + ```google.rpc.Status`` `__ + represented as a STRUCT, and containing only ``code`` and ``message``. Attributes: @@ -1489,26 +1892,26 @@ where the output is to be written to. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.BatchPredictOutputConfig) - ), + }, ) _sym_db.RegisterMessage(BatchPredictOutputConfig) ModelExportOutputConfig = _reflection.GeneratedProtocolMessageType( "ModelExportOutputConfig", (_message.Message,), - dict( - ParamsEntry=_reflection.GeneratedProtocolMessageType( + { + "ParamsEntry": _reflection.GeneratedProtocolMessageType( "ParamsEntry", (_message.Message,), - dict( - DESCRIPTOR=_MODELEXPORTOUTPUTCONFIG_PARAMSENTRY, - __module__="google.cloud.automl_v1.proto.io_pb2" + { + "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG_PARAMSENTRY, + "__module__": "google.cloud.automl_v1.proto.io_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ModelExportOutputConfig.ParamsEntry) - ), + }, ), - DESCRIPTOR=_MODELEXPORTOUTPUTCONFIG, - __module__="google.cloud.automl_v1.proto.io_pb2", - __doc__="""Output configuration for ModelExport Action. + "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG, + "__module__": "google.cloud.automl_v1.proto.io_pb2", + "__doc__": """Output configuration for ModelExport Action. Attributes: @@ -1517,39 +1920,47 @@ gcs_destination: Required. The Google Cloud Storage location where the model is to be written to. This location may only be set for the - following model formats: "tflite", "edgetpu\_tflite", - "tf\_saved\_model", "tf\_js", "core\_ml". Under the directory - given as the destination a new one with name "model-export--", + following model formats: “tflite”, “edgetpu_tflite”, + “tf_saved_model”, “tf_js”, “core_ml”. Under the directory + given as the destination a new one with name “model-export--”, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created. Inside the model and any of its supporting files will be written. model_format: The format in which the model must be exported. The available, and default, formats depend on the problem and model type (if - given problem and type combination doesn't have a format + given problem and type combination doesn’t have a format listed, it means its models are not exportable): - For Image Classification mobile-low-latency-1, mobile-versatile-1, - mobile-high-accuracy-1: "tflite" (default), "edgetpu\_tflite", - "tf\_saved\_model", "tf\_js". - For Image Classification - mobile-core-ml-low-latency-1, mobile-core-ml-versatile-1, - mobile-core-ml-high-accuracy-1: "core\_ml" (default). - - For Image Object Detection mobile-low-latency-1, mobile- - versatile-1, mobile-high-accuracy-1: "tflite", - "tf\_saved\_model", "tf\_js". Formats description: - - tflite - Used for Android mobile devices. - edgetpu\_tflite - - Used for `Edge TPU `__ - devices. - tf\_saved\_model - A tensorflow model in - SavedModel format. - tf\_js - A `TensorFlow.js + mobile-high-accuracy-1: “tflite” (default), “edgetpu_tflite”, + “tf_saved_model”, “tf_js”, “docker”. - For Image + Classification mobile-core-ml-low-latency-1, mobile-core- + ml-versatile-1, mobile-core-ml-high-accuracy-1: “core_ml” + (default). - For Image Object Detection mobile-low- + latency-1, mobile-versatile-1, mobile-high-accuracy-1: + “tflite”, “tf_saved_model”, “tf_js”. Formats description: + - tflite - Used for Android mobile devices. - edgetpu_tflite + - Used for `Edge TPU `__ devices. - tf_saved_model - A tensorflow model in + SavedModel format. - tf_js - A `TensorFlow.js `__ model that can be used - in the browser and in Node.js using JavaScript.x\` - core\_ml - - Used for iOS mobile devices. + in the browser and in Node.js using JavaScript. - docker - + Used for Docker containers. Use the params field to + customize the container. The container is verified to work + correctly on ubuntu 16.04 operating system. See more at + [containers quickstart](https: + //cloud.google.com/vision/automl/docs/containers-gcs- + quickstart) \* core_ml - Used for iOS mobile devices. params: Additional model-type and format specific parameters describing the requirements for the to be exported model - files, any string must be up to 25000 characters long. + files, any string must be up to 25000 characters long. - For + ``docker`` format: ``cpu_architecture`` - (string) “x86_64” + (default). ``gpu_architecture`` - (string) “none” (default), + “nvidia”. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ModelExportOutputConfig) - ), + }, ) _sym_db.RegisterMessage(ModelExportOutputConfig) _sym_db.RegisterMessage(ModelExportOutputConfig.ParamsEntry) @@ -1557,30 +1968,30 @@ GcsSource = _reflection.GeneratedProtocolMessageType( "GcsSource", (_message.Message,), - dict( - DESCRIPTOR=_GCSSOURCE, - __module__="google.cloud.automl_v1.proto.io_pb2", - __doc__="""The Google Cloud Storage location for the input content. + { + "DESCRIPTOR": _GCSSOURCE, + "__module__": "google.cloud.automl_v1.proto.io_pb2", + "__doc__": """The Google Cloud Storage location for the input content. Attributes: input_uris: Required. Google Cloud Storage URIs to input files, up to 2000 - characters long. Accepted forms: \* Full object path, e.g. - gs://bucket/directory/object.csv + characters long. Accepted forms: \* Full object path, + e.g. gs://bucket/directory/object.csv """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.GcsSource) - ), + }, ) _sym_db.RegisterMessage(GcsSource) GcsDestination = _reflection.GeneratedProtocolMessageType( "GcsDestination", (_message.Message,), - dict( - DESCRIPTOR=_GCSDESTINATION, - __module__="google.cloud.automl_v1.proto.io_pb2", - __doc__="""The Google Cloud Storage location where the output is to + { + "DESCRIPTOR": _GCSDESTINATION, + "__module__": "google.cloud.automl_v1.proto.io_pb2", + "__doc__": """The Google Cloud Storage location where the output is to be written to. @@ -1590,10 +2001,10 @@ 2000 characters long. Accepted forms: \* Prefix path: gs://bucket/directory The requesting user must have write permission to the bucket. The directory is created if it - doesn't exist. + doesn’t exist. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.GcsDestination) - ), + }, ) _sym_db.RegisterMessage(GcsDestination) diff --git a/google/cloud/automl_v1/proto/model.proto b/google/cloud/automl_v1/proto/model.proto index ee080684..f5368937 100644 --- a/google/cloud/automl_v1/proto/model.proto +++ b/google/cloud/automl_v1/proto/model.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -35,7 +34,7 @@ option ruby_package = "Google::Cloud::AutoML::V1"; message Model { option (google.api.resource) = { type: "automl.googleapis.com/Model" - pattern: "projects/{project_id}/locations/{location_id}/models/{model_id}" + pattern: "projects/{project}/locations/{location}/models/{model}" }; // Deployment state of the model. diff --git a/google/cloud/automl_v1/proto/model_evaluation.proto b/google/cloud/automl_v1/proto/model_evaluation.proto index 8c768adc..601389f7 100644 --- a/google/cloud/automl_v1/proto/model_evaluation.proto +++ b/google/cloud/automl_v1/proto/model_evaluation.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,13 +11,11 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1; -import "google/api/annotations.proto"; import "google/api/resource.proto"; import "google/cloud/automl/v1/classification.proto"; import "google/cloud/automl/v1/detection.proto"; @@ -25,6 +23,7 @@ import "google/cloud/automl/v1/text_extraction.proto"; import "google/cloud/automl/v1/text_sentiment.proto"; import "google/cloud/automl/v1/translation.proto"; import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; option csharp_namespace = "Google.Cloud.AutoML.V1"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl"; @@ -35,17 +34,24 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // Evaluation results of a model. message ModelEvaluation { + option (google.api.resource) = { + type: "automl.googleapis.com/ModelEvaluation" + pattern: "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}" + }; + // Output only. Problem type specific evaluation metrics. oneof metrics { - // Model evaluation metrics for image, text classification. + // Model evaluation metrics for image, text, video and tables + // classification. + // Tables problem is considered a classification when the target column + // is CATEGORY DataType. ClassificationEvaluationMetrics classification_evaluation_metrics = 8; // Model evaluation metrics for translation. TranslationEvaluationMetrics translation_evaluation_metrics = 9; // Model evaluation metrics for image object detection. - ImageObjectDetectionEvaluationMetrics - image_object_detection_evaluation_metrics = 12; + ImageObjectDetectionEvaluationMetrics image_object_detection_evaluation_metrics = 12; // Evaluation metrics for text sentiment models. TextSentimentEvaluationMetrics text_sentiment_evaluation_metrics = 11; @@ -60,8 +66,15 @@ message ModelEvaluation { // `projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}` string name = 1; - // Output only. The ID of the annotation spec that the model evaluation - // applies to. The The ID is empty for the overall model evaluation. + // Output only. The ID of the annotation spec that the model evaluation applies to. The + // The ID is empty for the overall model evaluation. + // For Tables annotation specs in the dataset do not exist and this ID is + // always not set, but for CLASSIFICATION + // + // [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type] + // the + // [display_name][google.cloud.automl.v1.ModelEvaluation.display_name] + // field is used. string annotation_spec_id = 2; // Output only. The value of @@ -69,7 +82,12 @@ message ModelEvaluation { // at the moment when the model was trained. Because this field returns a // value at model training time, for different models trained from the same // dataset, the values may differ, since display names could had been changed - // between the two model's trainings. + // between the two model's trainings. For Tables CLASSIFICATION + // + // [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type] + // distinct values of the target column at the moment of the model evaluation + // are populated here. + // The display_name is empty for the overall model evaluation. string display_name = 15; // Output only. Timestamp when this model evaluation was created. diff --git a/google/cloud/automl_v1/proto/model_evaluation_pb2.py b/google/cloud/automl_v1/proto/model_evaluation_pb2.py index 7ac909df..4e6f2967 100644 --- a/google/cloud/automl_v1/proto/model_evaluation_pb2.py +++ b/google/cloud/automl_v1/proto/model_evaluation_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1/proto/model_evaluation.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,7 +12,6 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1.proto import ( classification_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_classification__pb2, @@ -33,20 +29,16 @@ translation_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_translation__pb2, ) from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1/proto/model_evaluation.proto", package="google.cloud.automl.v1", syntax="proto3", - serialized_options=_b( - "\n\032com.google.cloud.automl.v1P\001Z params = 3; } -// Response message for -// [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. +// Response message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. message PredictResponse { // Prediction result. - // Translation and Text Sentiment will return precisely one payload. + // AutoML Translation and AutoML Natural Language Sentiment Analysis + // return precisely one payload. repeated AnnotationPayload payload = 1; // The preprocessed example that AutoML actually makes prediction on. // Empty if AutoML does not preprocess the input example. - // * For Text Extraction: - // If the input is a .pdf file, the OCR'ed text will be provided in - // [document_text][google.cloud.automl.v1.Document.document_text]. - // - // * For Text Classification: - // If the input is a .pdf file, the OCR'ed trucated text will be provided in - // [document_text][google.cloud.automl.v1.Document.document_text]. // - // * For Text Sentiment: - // If the input is a .pdf file, the OCR'ed trucated text will be provided in - // [document_text][google.cloud.automl.v1.Document.document_text]. + // For AutoML Natural Language (Classification, Entity Extraction, and + // Sentiment Analysis), if the input is a document, the recognized text is + // returned in the + // [document_text][google.cloud.automl.v1.Document.document_text] + // property. ExamplePayload preprocessed_input = 3; // Additional domain-specific prediction response metadata. // - // * For Image Object Detection: - // `max_bounding_box_count` - (int64) At most that many bounding boxes per - // image could have been returned. - // - // * For Text Sentiment: - // `sentiment_score` - (float, deprecated) A value between -1 and 1, - // -1 maps to least positive sentiment, while 1 maps to the most positive - // one and the higher the score, the more positive the sentiment in the - // document is. Yet these values are relative to the training data, so - // e.g. if all data was positive then -1 will be also positive (though - // the least). - // The sentiment_score shouldn't be confused with "score" or "magnitude" - // from the previous Natural Language Sentiment Analysis API. + // AutoML Vision Object Detection + // + // `max_bounding_box_count` + // : (int64) The maximum number of bounding boxes to return per image. + // + // AutoML Natural Language Sentiment Analysis + // + // `sentiment_score` + // : (float, deprecated) A value between -1 and 1, + // -1 maps to least positive sentiment, while 1 maps to the most positive + // one and the higher the score, the more positive the sentiment in the + // document is. Yet these values are relative to the training data, so + // e.g. if all data was positive then -1 is also positive (though + // the least). + // `sentiment_score` is not the same as "score" and "magnitude" + // from Sentiment Analysis in the Natural Language API. map metadata = 2; } -// Request message for -// [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. +// Request message for [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. message BatchPredictRequest { - // Name of the model requested to serve the batch prediction. - string name = 1; + // Required. Name of the model requested to serve the batch prediction. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; // Required. The input configuration for batch prediction. - BatchPredictInputConfig input_config = 3; + BatchPredictInputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The Configuration specifying where output predictions should // be written. - BatchPredictOutputConfig output_config = 4; + BatchPredictOutputConfig output_config = 4 [(google.api.field_behavior) = REQUIRED]; // Additional domain-specific parameters for the predictions, any string must // be up to 25000 characters long. // - // * For Text Classification: + // AutoML Natural Language Classification + // + // `score_threshold` + // : (float) A value from 0.0 to 1.0. When the model + // makes predictions for a text snippet, it will only produce results + // that have at least this confidence score. The default is 0.5. + // + // + // AutoML Vision Classification + // + // `score_threshold` + // : (float) A value from 0.0 to 1.0. When the model + // makes predictions for an image, it will only produce results that + // have at least this confidence score. The default is 0.5. + // + // AutoML Vision Object Detection + // + // `score_threshold` + // : (float) When Model detects objects on the image, + // it will only produce bounding boxes which have at least this + // confidence score. Value in 0 to 1 range, default is 0.5. // - // `score_threshold` - (float) A value from 0.0 to 1.0. When the model - // makes predictions for a text snippet, it will only produce results - // that have at least this confidence score. The default is 0.5. + // `max_bounding_box_count` + // : (int64) The maximum number of bounding + // boxes returned per image. The default is 100, the + // number of bounding boxes returned might be limited by the server. + // AutoML Video Intelligence Classification // - // * For Image Classification: + // `score_threshold` + // : (float) A value from 0.0 to 1.0. When the model + // makes predictions for a video, it will only produce results that + // have at least this confidence score. The default is 0.5. // - // `score_threshold` - (float) A value from 0.0 to 1.0. When the model - // makes predictions for an image, it will only produce results that - // have at least this confidence score. The default is 0.5. + // `segment_classification` + // : (boolean) Set to true to request + // segment-level classification. AutoML Video Intelligence returns + // labels and their confidence scores for the entire segment of the + // video that user specified in the request configuration. + // The default is true. // - // * For Image Object Detection: + // `shot_classification` + // : (boolean) Set to true to request shot-level + // classification. AutoML Video Intelligence determines the boundaries + // for each camera shot in the entire segment of the video that user + // specified in the request configuration. AutoML Video Intelligence + // then returns labels and their confidence scores for each detected + // shot, along with the start and end time of the shot. + // The default is false. + // + // WARNING: Model evaluation is not done for this classification type, + // the quality of it depends on training data, but there are no metrics + // provided to describe that quality. + // + // `1s_interval_classification` + // : (boolean) Set to true to request + // classification for a video at one-second intervals. AutoML Video + // Intelligence returns labels and their confidence scores for each + // second of the entire segment of the video that user specified in the + // request configuration. The default is false. + // + // WARNING: Model evaluation is not done for this classification + // type, the quality of it depends on training data, but there are no + // metrics provided to describe that quality. + // + // AutoML Video Intelligence Object Tracking + // + // `score_threshold` + // : (float) When Model detects objects on video frames, + // it will only produce bounding boxes which have at least this + // confidence score. Value in 0 to 1 range, default is 0.5. + // + // `max_bounding_box_count` + // : (int64) The maximum number of bounding + // boxes returned per image. The default is 100, the + // number of bounding boxes returned might be limited by the server. + // + // `min_bounding_box_size` + // : (float) Only bounding boxes with shortest edge + // at least that long as a relative value of video frame size are + // returned. Value in 0 to 1 range. Default is 0. // - // `score_threshold` - (float) When Model detects objects on the image, - // it will only produce bounding boxes which have at least this - // confidence score. Value in 0 to 1 range, default is 0.5. - // `max_bounding_box_count` - (int64) No more than this number of bounding - // boxes will be produced per image. Default is 100, the - // requested value may be limited by server. map params = 5; } // Result of the Batch Predict. This message is returned in // [response][google.longrunning.Operation.response] of the operation returned -// by the -// [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. +// by the [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. message BatchPredictResult { // Additional domain-specific prediction response metadata. // - // * For Image Object Detection: - // `max_bounding_box_count` - (int64) At most that many bounding boxes per - // image could have been returned. + // AutoML Vision Object Detection + // + // `max_bounding_box_count` + // : (int64) The maximum number of bounding boxes returned per image. + // + // AutoML Video Intelligence Object Tracking + // + // `max_bounding_box_count` + // : (int64) The maximum number of bounding boxes returned per frame. map metadata = 1; } diff --git a/google/cloud/automl_v1/proto/prediction_service_pb2.py b/google/cloud/automl_v1/proto/prediction_service_pb2.py index 7e41c82a..c7a138eb 100644 --- a/google/cloud/automl_v1/proto/prediction_service_pb2.py +++ b/google/cloud/automl_v1/proto/prediction_service_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1/proto/prediction_service.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -17,6 +14,7 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1.proto import ( annotation_payload_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__payload__pb2, @@ -39,15 +37,12 @@ name="google/cloud/automl_v1/proto/prediction_service.proto", package="google.cloud.automl.v1", syntax="proto3", - serialized_options=_b( - "\n\032com.google.cloud.automl.v1B\026PredictionServiceProtoP\001Z The dataset has // translation_dataset_metadata. @@ -275,98 +342,121 @@ message ListDatasetsRequest { // A token identifying a page of results for the server to return // Typically obtained via - // [ListDatasetsResponse.next_page_token][google.cloud.automl.v1.ListDatasetsResponse.next_page_token] - // of the previous + // [ListDatasetsResponse.next_page_token][google.cloud.automl.v1.ListDatasetsResponse.next_page_token] of the previous // [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets] call. string page_token = 6; } -// Response message for -// [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. +// Response message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. message ListDatasetsResponse { // The datasets read. repeated Dataset datasets = 1; // A token to retrieve next page of results. - // Pass to - // [ListDatasetsRequest.page_token][google.cloud.automl.v1.ListDatasetsRequest.page_token] - // to obtain that page. + // Pass to [ListDatasetsRequest.page_token][google.cloud.automl.v1.ListDatasetsRequest.page_token] to obtain that page. string next_page_token = 2; } -// Request message for -// [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] +// Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] message UpdateDatasetRequest { - // The dataset which replaces the resource on the server. - Dataset dataset = 1; + // Required. The dataset which replaces the resource on the server. + Dataset dataset = 1 [(google.api.field_behavior) = REQUIRED]; // Required. The update mask applies to the resource. - google.protobuf.FieldMask update_mask = 2; + google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; } -// Request message for -// [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. +// Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. message DeleteDatasetRequest { - // The resource name of the dataset to delete. - string name = 1; + // Required. The resource name of the dataset to delete. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Dataset" + } + ]; } -// Request message for -// [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. +// Request message for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. message ImportDataRequest { // Required. Dataset name. Dataset must already exist. All imported // annotations and examples will be added. - string name = 1; + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Dataset" + } + ]; // Required. The desired input location and its domain specific semantics, // if any. - InputConfig input_config = 3; + InputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED]; } -// Request message for -// [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. +// Request message for [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. message ExportDataRequest { // Required. The resource name of the dataset. - string name = 1; + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Dataset" + } + ]; // Required. The desired output location. - OutputConfig output_config = 3; + OutputConfig output_config = 3 [(google.api.field_behavior) = REQUIRED]; } -// Request message for -// [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. +// Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. message GetAnnotationSpecRequest { - // The resource name of the annotation spec to retrieve. - string name = 1; + // Required. The resource name of the annotation spec to retrieve. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/AnnotationSpec" + } + ]; } -// Request message for -// [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. +// Request message for [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. message CreateModelRequest { - // Resource name of the parent project where the model is being created. - string parent = 1; - - // The model to create. - Model model = 4; + // Required. Resource name of the parent project where the model is being created. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // Required. The model to create. + Model model = 4 [(google.api.field_behavior) = REQUIRED]; } -// Request message for -// [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. +// Request message for [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. message GetModelRequest { - // Resource name of the model. - string name = 1; + // Required. Resource name of the model. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; } -// Request message for -// [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. +// Request message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. message ListModelsRequest { - // Resource name of the project, from which to list the models. - string parent = 1; + // Required. Resource name of the project, from which to list the models. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; // An expression for filtering the results of the request. // // * `model_metadata` - for existence of the case (e.g. - // image_classification_model_metadata:*). + // video_classification_model_metadata:*). // * `dataset_id` - for = or !=. Some examples of using the filter are: // // * `image_classification_model_metadata:*` --> The model has @@ -379,94 +469,112 @@ message ListModelsRequest { // A token identifying a page of results for the server to return // Typically obtained via - // [ListModelsResponse.next_page_token][google.cloud.automl.v1.ListModelsResponse.next_page_token] - // of the previous + // [ListModelsResponse.next_page_token][google.cloud.automl.v1.ListModelsResponse.next_page_token] of the previous // [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels] call. string page_token = 6; } -// Response message for -// [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. +// Response message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. message ListModelsResponse { // List of models in the requested page. repeated Model model = 1; // A token to retrieve next page of results. - // Pass to - // [ListModelsRequest.page_token][google.cloud.automl.v1.ListModelsRequest.page_token] - // to obtain that page. + // Pass to [ListModelsRequest.page_token][google.cloud.automl.v1.ListModelsRequest.page_token] to obtain that page. string next_page_token = 2; } -// Request message for -// [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. +// Request message for [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. message DeleteModelRequest { - // Resource name of the model being deleted. - string name = 1; + // Required. Resource name of the model being deleted. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; } -// Request message for -// [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] +// Request message for [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] message UpdateModelRequest { - // The model which replaces the resource on the server. - Model model = 1; + // Required. The model which replaces the resource on the server. + Model model = 1 [(google.api.field_behavior) = REQUIRED]; // Required. The update mask applies to the resource. - google.protobuf.FieldMask update_mask = 2; + google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; } -// Request message for -// [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. +// Request message for [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. message DeployModelRequest { // The per-domain specific deployment parameters. oneof model_deployment_metadata { // Model deployment metadata specific to Image Object Detection. - ImageObjectDetectionModelDeploymentMetadata - image_object_detection_model_deployment_metadata = 2; + ImageObjectDetectionModelDeploymentMetadata image_object_detection_model_deployment_metadata = 2; // Model deployment metadata specific to Image Classification. - ImageClassificationModelDeploymentMetadata - image_classification_model_deployment_metadata = 4; + ImageClassificationModelDeploymentMetadata image_classification_model_deployment_metadata = 4; } - // Resource name of the model to deploy. - string name = 1; + // Required. Resource name of the model to deploy. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; } -// Request message for -// [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. +// Request message for [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. message UndeployModelRequest { - // Resource name of the model to undeploy. - string name = 1; + // Required. Resource name of the model to undeploy. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; } -// Request message for -// [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. Models need -// to be enabled for exporting, otherwise an error code will be returned. +// Request message for [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. +// Models need to be enabled for exporting, otherwise an error code will be +// returned. message ExportModelRequest { // Required. The resource name of the model to export. - string name = 1; + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; // Required. The desired output location and configuration. - ModelExportOutputConfig output_config = 3; + ModelExportOutputConfig output_config = 3 [(google.api.field_behavior) = REQUIRED]; } -// Request message for -// [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. +// Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. message GetModelEvaluationRequest { - // Resource name for the model evaluation. - string name = 1; + // Required. Resource name for the model evaluation. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/ModelEvaluation" + } + ]; } -// Request message for -// [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. +// Request message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. message ListModelEvaluationsRequest { - // Resource name of the model to list the model evaluations for. + // Required. Resource name of the model to list the model evaluations for. // If modelId is set as "-", this will list model evaluations from across all // models of the parent location. - string parent = 1; - - // An expression for filtering the results of the request. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; + + // Required. An expression for filtering the results of the request. // // * `annotation_spec_id` - for =, != or existence. See example below for // the last. @@ -477,31 +585,25 @@ message ListModelEvaluationsRequest { // annotation spec with ID different than 4. // * `NOT annotation_spec_id:*` --> The model evaluation was done for // aggregate of all annotation specs. - string filter = 3; + string filter = 3 [(google.api.field_behavior) = REQUIRED]; // Requested page size. int32 page_size = 4; // A token identifying a page of results for the server to return. // Typically obtained via - // [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1.ListModelEvaluationsResponse.next_page_token] - // of the previous - // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] - // call. + // [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1.ListModelEvaluationsResponse.next_page_token] of the previous + // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] call. string page_token = 6; } -// Response message for -// [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. +// Response message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. message ListModelEvaluationsResponse { // List of model evaluations in the requested page. repeated ModelEvaluation model_evaluation = 1; // A token to retrieve next page of results. - // Pass to the - // [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1.ListModelEvaluationsRequest.page_token] - // field of a new - // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] - // request to obtain that page. + // Pass to the [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1.ListModelEvaluationsRequest.page_token] field of a new + // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] request to obtain that page. string next_page_token = 2; } diff --git a/google/cloud/automl_v1/proto/service_pb2.py b/google/cloud/automl_v1/proto/service_pb2.py index c41a58f9..112edb18 100644 --- a/google/cloud/automl_v1/proto/service_pb2.py +++ b/google/cloud/automl_v1/proto/service_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1/proto/service.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -17,6 +14,7 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1.proto import ( annotation_payload_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__payload__pb2, @@ -52,15 +50,12 @@ name="google/cloud/automl_v1/proto/service.proto", package="google.cloud.automl.v1", syntax="proto3", - serialized_options=_b( - "\n\032com.google.cloud.automl.v1B\013AutoMlProtoP\001Z/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}\x12\x93\x01\n\x0b\x43reateModel\x12*.google.cloud.automl.v1.CreateModelRequest\x1a\x1d.google.longrunning.Operation"9\x82\xd3\xe4\x93\x02\x33"*/v1/{parent=projects/*/locations/*}/models:\x05model\x12\x86\x01\n\x08GetModel\x12\'.google.cloud.automl.v1.GetModelRequest\x1a\x1d.google.cloud.automl.v1.Model"2\x82\xd3\xe4\x93\x02,\x12*/v1/{name=projects/*/locations/*/models/*}\x12\x97\x01\n\nListModels\x12).google.cloud.automl.v1.ListModelsRequest\x1a*.google.cloud.automl.v1.ListModelsResponse"2\x82\xd3\xe4\x93\x02,\x12*/v1/{parent=projects/*/locations/*}/models\x12\x8c\x01\n\x0b\x44\x65leteModel\x12*.google.cloud.automl.v1.DeleteModelRequest\x1a\x1d.google.longrunning.Operation"2\x82\xd3\xe4\x93\x02,**/v1/{name=projects/*/locations/*/models/*}\x12\x99\x01\n\x0bUpdateModel\x12*.google.cloud.automl.v1.UpdateModelRequest\x1a\x1d.google.cloud.automl.v1.Model"?\x82\xd3\xe4\x93\x02\x39\x32\x30/v1/{model.name=projects/*/locations/*/models/*}:\x05model\x12\x96\x01\n\x0b\x44\x65ployModel\x12*.google.cloud.automl.v1.DeployModelRequest\x1a\x1d.google.longrunning.Operation"<\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/locations/*/models/*}:deploy:\x01*\x12\x9c\x01\n\rUndeployModel\x12,.google.cloud.automl.v1.UndeployModelRequest\x1a\x1d.google.longrunning.Operation">\x82\xd3\xe4\x93\x02\x38"3/v1/{name=projects/*/locations/*/models/*}:undeploy:\x01*\x12\x96\x01\n\x0b\x45xportModel\x12*.google.cloud.automl.v1.ExportModelRequest\x1a\x1d.google.longrunning.Operation"<\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/locations/*/models/*}:export:\x01*\x12\xb7\x01\n\x12GetModelEvaluation\x12\x31.google.cloud.automl.v1.GetModelEvaluationRequest\x1a\'.google.cloud.automl.v1.ModelEvaluation"E\x82\xd3\xe4\x93\x02?\x12=/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\x12\xc8\x01\n\x14ListModelEvaluations\x12\x33.google.cloud.automl.v1.ListModelEvaluationsRequest\x1a\x34.google.cloud.automl.v1.ListModelEvaluationsResponse"E\x82\xd3\xe4\x93\x02?\x12=/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations\x1aI\xca\x41\x15\x61utoml.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xb7\x01\n\x1a\x63om.google.cloud.automl.v1B\x0b\x41utoMlProtoP\x01Z\n\x0cinput_config\x18\x03 \x01(\x0b\x32#.google.cloud.automl.v1.InputConfigB\x03\xe0\x41\x02"\x8a\x01\n\x11\x45xportDataRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset\x12@\n\routput_config\x18\x03 \x01(\x0b\x32$.google.cloud.automl.v1.OutputConfigB\x03\xe0\x41\x02"V\n\x18GetAnnotationSpecRequest\x12:\n\x04name\x18\x01 \x01(\tB,\xe0\x41\x02\xfa\x41&\n$automl.googleapis.com/AnnotationSpec"\x82\x01\n\x12\x43reateModelRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x31\n\x05model\x18\x04 \x01(\x0b\x32\x1d.google.cloud.automl.v1.ModelB\x03\xe0\x41\x02"D\n\x0fGetModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\x85\x01\n\x11ListModelsRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"[\n\x12ListModelsResponse\x12,\n\x05model\x18\x01 \x03(\x0b\x32\x1d.google.cloud.automl.v1.Model\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"G\n\x12\x44\x65leteModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"}\n\x12UpdateModelRequest\x12\x31\n\x05model\x18\x01 \x01(\x0b\x32\x1d.google.cloud.automl.v1.ModelB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"\xe3\x02\n\x12\x44\x65ployModelRequest\x12\x7f\n0image_object_detection_model_deployment_metadata\x18\x02 \x01(\x0b\x32\x43.google.cloud.automl.v1.ImageObjectDetectionModelDeploymentMetadataH\x00\x12|\n.image_classification_model_deployment_metadata\x18\x04 \x01(\x0b\x32\x42.google.cloud.automl.v1.ImageClassificationModelDeploymentMetadataH\x00\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/ModelB\x1b\n\x19model_deployment_metadata"I\n\x14UndeployModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\x94\x01\n\x12\x45xportModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12K\n\routput_config\x18\x03 \x01(\x0b\x32/.google.cloud.automl.v1.ModelExportOutputConfigB\x03\xe0\x41\x02"X\n\x19GetModelEvaluationRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%automl.googleapis.com/ModelEvaluation"\x8e\x01\n\x1bListModelEvaluationsRequest\x12\x33\n\x06parent\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12\x13\n\x06\x66ilter\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"z\n\x1cListModelEvaluationsResponse\x12\x41\n\x10model_evaluation\x18\x01 \x03(\x0b\x32\'.google.cloud.automl.v1.ModelEvaluation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xe8\x1b\n\x06\x41utoMl\x12\xcb\x01\n\rCreateDataset\x12,.google.cloud.automl.v1.CreateDatasetRequest\x1a\x1d.google.longrunning.Operation"m\x82\xd3\xe4\x93\x02\x37",/v1/{parent=projects/*/locations/*}/datasets:\x07\x64\x61taset\xda\x41\x0eparent,dataset\xca\x41\x1c\n\x07\x44\x61taset\x12\x11OperationMetadata\x12\x95\x01\n\nGetDataset\x12).google.cloud.automl.v1.GetDatasetRequest\x1a\x1f.google.cloud.automl.v1.Dataset";\x82\xd3\xe4\x93\x02.\x12,/v1/{name=projects/*/locations/*/datasets/*}\xda\x41\x04name\x12\xa8\x01\n\x0cListDatasets\x12+.google.cloud.automl.v1.ListDatasetsRequest\x1a,.google.cloud.automl.v1.ListDatasetsResponse"=\x82\xd3\xe4\x93\x02.\x12,/v1/{parent=projects/*/locations/*}/datasets\xda\x41\x06parent\x12\xbb\x01\n\rUpdateDataset\x12,.google.cloud.automl.v1.UpdateDatasetRequest\x1a\x1f.google.cloud.automl.v1.Dataset"[\x82\xd3\xe4\x93\x02?24/v1/{dataset.name=projects/*/locations/*/datasets/*}:\x07\x64\x61taset\xda\x41\x13\x64\x61taset,update_mask\x12\xc6\x01\n\rDeleteDataset\x12,.google.cloud.automl.v1.DeleteDatasetRequest\x1a\x1d.google.longrunning.Operation"h\x82\xd3\xe4\x93\x02.*,/v1/{name=projects/*/locations/*/datasets/*}\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xdc\x01\n\nImportData\x12).google.cloud.automl.v1.ImportDataRequest\x1a\x1d.google.longrunning.Operation"\x83\x01\x82\xd3\xe4\x93\x02<"7/v1/{name=projects/*/locations/*/datasets/*}:importData:\x01*\xda\x41\x11name,input_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xdd\x01\n\nExportData\x12).google.cloud.automl.v1.ExportDataRequest\x1a\x1d.google.longrunning.Operation"\x84\x01\x82\xd3\xe4\x93\x02<"7/v1/{name=projects/*/locations/*/datasets/*}:exportData:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xbc\x01\n\x11GetAnnotationSpec\x12\x30.google.cloud.automl.v1.GetAnnotationSpecRequest\x1a&.google.cloud.automl.v1.AnnotationSpec"M\x82\xd3\xe4\x93\x02@\x12>/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}\xda\x41\x04name\x12\xbf\x01\n\x0b\x43reateModel\x12*.google.cloud.automl.v1.CreateModelRequest\x1a\x1d.google.longrunning.Operation"e\x82\xd3\xe4\x93\x02\x33"*/v1/{parent=projects/*/locations/*}/models:\x05model\xda\x41\x0cparent,model\xca\x41\x1a\n\x05Model\x12\x11OperationMetadata\x12\x8d\x01\n\x08GetModel\x12\'.google.cloud.automl.v1.GetModelRequest\x1a\x1d.google.cloud.automl.v1.Model"9\x82\xd3\xe4\x93\x02,\x12*/v1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\x12\xa0\x01\n\nListModels\x12).google.cloud.automl.v1.ListModelsRequest\x1a*.google.cloud.automl.v1.ListModelsResponse";\x82\xd3\xe4\x93\x02,\x12*/v1/{parent=projects/*/locations/*}/models\xda\x41\x06parent\x12\xc0\x01\n\x0b\x44\x65leteModel\x12*.google.cloud.automl.v1.DeleteModelRequest\x1a\x1d.google.longrunning.Operation"f\x82\xd3\xe4\x93\x02,**/v1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xad\x01\n\x0bUpdateModel\x12*.google.cloud.automl.v1.UpdateModelRequest\x1a\x1d.google.cloud.automl.v1.Model"S\x82\xd3\xe4\x93\x02\x39\x32\x30/v1/{model.name=projects/*/locations/*/models/*}:\x05model\xda\x41\x11model,update_mask\x12\xca\x01\n\x0b\x44\x65ployModel\x12*.google.cloud.automl.v1.DeployModelRequest\x1a\x1d.google.longrunning.Operation"p\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/locations/*/models/*}:deploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xd0\x01\n\rUndeployModel\x12,.google.cloud.automl.v1.UndeployModelRequest\x1a\x1d.google.longrunning.Operation"r\x82\xd3\xe4\x93\x02\x38"3/v1/{name=projects/*/locations/*/models/*}:undeploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xd8\x01\n\x0b\x45xportModel\x12*.google.cloud.automl.v1.ExportModelRequest\x1a\x1d.google.longrunning.Operation"~\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/locations/*/models/*}:export:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xbe\x01\n\x12GetModelEvaluation\x12\x31.google.cloud.automl.v1.GetModelEvaluationRequest\x1a\'.google.cloud.automl.v1.ModelEvaluation"L\x82\xd3\xe4\x93\x02?\x12=/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\xda\x41\x04name\x12\xd8\x01\n\x14ListModelEvaluations\x12\x33.google.cloud.automl.v1.ListModelEvaluationsRequest\x1a\x34.google.cloud.automl.v1.ListModelEvaluationsResponse"U\x82\xd3\xe4\x93\x02?\x12=/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations\xda\x41\rparent,filter\x1aI\xca\x41\x15\x61utoml.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xb7\x01\n\x1a\x63om.google.cloud.automl.v1B\x0b\x41utoMlProtoP\x01Z The dataset has - translation\_dataset\_metadata. + ``dataset_metadata`` - for existence of the case ( + e.g. image_classification_dataset_metadata:*). Some examples + of using the filter are: - + ``translation_dataset_metadata:*`` –> The dataset has + translation_dataset_metadata. page_size: Requested page size. Server may return fewer results than requested. If unspecified, server will pick a default size. page_token: A token identifying a page of results for the server to return - Typically obtained via [ListDatasetsResponse.next\_page\_token - ][google.cloud.automl.v1.ListDatasetsResponse.next\_page\_toke - n] of the previous [AutoMl.ListDatasets][google.cloud.automl.v - 1.AutoMl.ListDatasets] call. + Typically obtained via [ListDatasetsResponse.next_page_token][ + google.cloud.automl.v1.ListDatasetsResponse.next_page_token] + of the previous [AutoMl.ListDatasets][google.cloud.automl.v1.A + utoMl.ListDatasets] call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListDatasetsRequest) - ), + }, ) _sym_db.RegisterMessage(ListDatasetsRequest) ListDatasetsResponse = _reflection.GeneratedProtocolMessageType( "ListDatasetsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTDATASETSRESPONSE, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Response message for + { + "DESCRIPTOR": _LISTDATASETSRESPONSE, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. @@ -1463,61 +1460,62 @@ The datasets read. next_page_token: A token to retrieve next page of results. Pass to [ListDataset - sRequest.page\_token][google.cloud.automl.v1.ListDatasetsReque - st.page\_token] to obtain that page. + sRequest.page_token][google.cloud.automl.v1.ListDatasetsReques + t.page_token] to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListDatasetsResponse) - ), + }, ) _sym_db.RegisterMessage(ListDatasetsResponse) UpdateDatasetRequest = _reflection.GeneratedProtocolMessageType( "UpdateDatasetRequest", (_message.Message,), - dict( - DESCRIPTOR=_UPDATEDATASETREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _UPDATEDATASETREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] Attributes: dataset: - The dataset which replaces the resource on the server. + Required. The dataset which replaces the resource on the + server. update_mask: Required. The update mask applies to the resource. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.UpdateDatasetRequest) - ), + }, ) _sym_db.RegisterMessage(UpdateDatasetRequest) DeleteDatasetRequest = _reflection.GeneratedProtocolMessageType( "DeleteDatasetRequest", (_message.Message,), - dict( - DESCRIPTOR=_DELETEDATASETREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _DELETEDATASETREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. Attributes: name: - The resource name of the dataset to delete. + Required. The resource name of the dataset to delete. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DeleteDatasetRequest) - ), + }, ) _sym_db.RegisterMessage(DeleteDatasetRequest) ImportDataRequest = _reflection.GeneratedProtocolMessageType( "ImportDataRequest", (_message.Message,), - dict( - DESCRIPTOR=_IMPORTDATAREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _IMPORTDATAREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. @@ -1530,17 +1528,17 @@ semantics, if any. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ImportDataRequest) - ), + }, ) _sym_db.RegisterMessage(ImportDataRequest) ExportDataRequest = _reflection.GeneratedProtocolMessageType( "ExportDataRequest", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTDATAREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _EXPORTDATAREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. @@ -1551,113 +1549,115 @@ Required. The desired output location. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ExportDataRequest) - ), + }, ) _sym_db.RegisterMessage(ExportDataRequest) GetAnnotationSpecRequest = _reflection.GeneratedProtocolMessageType( "GetAnnotationSpecRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETANNOTATIONSPECREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _GETANNOTATIONSPECREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. Attributes: name: - The resource name of the annotation spec to retrieve. + Required. The resource name of the annotation spec to + retrieve. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.GetAnnotationSpecRequest) - ), + }, ) _sym_db.RegisterMessage(GetAnnotationSpecRequest) CreateModelRequest = _reflection.GeneratedProtocolMessageType( "CreateModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_CREATEMODELREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _CREATEMODELREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. Attributes: parent: - Resource name of the parent project where the model is being - created. + Required. Resource name of the parent project where the model + is being created. model: - The model to create. + Required. The model to create. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.CreateModelRequest) - ), + }, ) _sym_db.RegisterMessage(CreateModelRequest) GetModelRequest = _reflection.GeneratedProtocolMessageType( "GetModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETMODELREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _GETMODELREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. Attributes: name: - Resource name of the model. + Required. Resource name of the model. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.GetModelRequest) - ), + }, ) _sym_db.RegisterMessage(GetModelRequest) ListModelsRequest = _reflection.GeneratedProtocolMessageType( "ListModelsRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELSREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _LISTMODELSREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. Attributes: parent: - Resource name of the project, from which to list the models. + Required. Resource name of the project, from which to list the + models. filter: An expression for filtering the results of the request. - - ``model_metadata`` - for existence of the case (e.g. - image\_classification\_model\_metadata:\*). - ``dataset_id`` + ``model_metadata`` - for existence of the case ( + e.g. video_classification_model_metadata:*). - ``dataset_id`` - for = or !=. Some examples of using the filter are: - - ``image_classification_model_metadata:*`` --> The model has - image\_classification\_model\_metadata. - ``dataset_id=5`` - --> The model was created from a dataset with ID 5. + ``image_classification_model_metadata:*`` –> The model has + image_classification_model_metadata. - ``dataset_id=5`` –> + The model was created from a dataset with ID 5. page_size: Requested page size. page_token: A token identifying a page of results for the server to return - Typically obtained via [ListModelsResponse.next\_page\_token][ - google.cloud.automl.v1.ListModelsResponse.next\_page\_token] - of the previous + Typically obtained via [ListModelsResponse.next_page_token][go + ogle.cloud.automl.v1.ListModelsResponse.next_page_token] of + the previous [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels] call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListModelsRequest) - ), + }, ) _sym_db.RegisterMessage(ListModelsRequest) ListModelsResponse = _reflection.GeneratedProtocolMessageType( "ListModelsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELSRESPONSE, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Response message for + { + "DESCRIPTOR": _LISTMODELSRESPONSE, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. @@ -1666,61 +1666,61 @@ List of models in the requested page. next_page_token: A token to retrieve next page of results. Pass to [ListModelsR - equest.page\_token][google.cloud.automl.v1.ListModelsRequest.p - age\_token] to obtain that page. + equest.page_token][google.cloud.automl.v1.ListModelsRequest.pa + ge_token] to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListModelsResponse) - ), + }, ) _sym_db.RegisterMessage(ListModelsResponse) DeleteModelRequest = _reflection.GeneratedProtocolMessageType( "DeleteModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_DELETEMODELREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _DELETEMODELREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. Attributes: name: - Resource name of the model being deleted. + Required. Resource name of the model being deleted. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DeleteModelRequest) - ), + }, ) _sym_db.RegisterMessage(DeleteModelRequest) UpdateModelRequest = _reflection.GeneratedProtocolMessageType( "UpdateModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_UPDATEMODELREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _UPDATEMODELREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] Attributes: model: - The model which replaces the resource on the server. + Required. The model which replaces the resource on the server. update_mask: Required. The update mask applies to the resource. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.UpdateModelRequest) - ), + }, ) _sym_db.RegisterMessage(UpdateModelRequest) DeployModelRequest = _reflection.GeneratedProtocolMessageType( "DeployModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_DEPLOYMODELREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _DEPLOYMODELREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. @@ -1732,39 +1732,39 @@ image_classification_model_deployment_metadata: Model deployment metadata specific to Image Classification. name: - Resource name of the model to deploy. + Required. Resource name of the model to deploy. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DeployModelRequest) - ), + }, ) _sym_db.RegisterMessage(DeployModelRequest) UndeployModelRequest = _reflection.GeneratedProtocolMessageType( "UndeployModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_UNDEPLOYMODELREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _UNDEPLOYMODELREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. Attributes: name: - Resource name of the model to undeploy. + Required. Resource name of the model to undeploy. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.UndeployModelRequest) - ), + }, ) _sym_db.RegisterMessage(UndeployModelRequest) ExportModelRequest = _reflection.GeneratedProtocolMessageType( "ExportModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTMODELREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _EXPORTMODELREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. Models need to be enabled for exporting, otherwise an error code will be returned. @@ -1777,74 +1777,75 @@ Required. The desired output location and configuration. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ExportModelRequest) - ), + }, ) _sym_db.RegisterMessage(ExportModelRequest) GetModelEvaluationRequest = _reflection.GeneratedProtocolMessageType( "GetModelEvaluationRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETMODELEVALUATIONREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _GETMODELEVALUATIONREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. Attributes: name: - Resource name for the model evaluation. + Required. Resource name for the model evaluation. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.GetModelEvaluationRequest) - ), + }, ) _sym_db.RegisterMessage(GetModelEvaluationRequest) ListModelEvaluationsRequest = _reflection.GeneratedProtocolMessageType( "ListModelEvaluationsRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELEVALUATIONSREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _LISTMODELEVALUATIONSREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. Attributes: parent: - Resource name of the model to list the model evaluations for. - If modelId is set as "-", this will list model evaluations - from across all models of the parent location. + Required. Resource name of the model to list the model + evaluations for. If modelId is set as “-”, this will list + model evaluations from across all models of the parent + location. filter: - An expression for filtering the results of the request. - - ``annotation_spec_id`` - for =, != or existence. See example - below for the last. Some examples of using the filter are: - - ``annotation_spec_id!=4`` --> The model evaluation was done - for annotation spec with ID different than 4. - ``NOT - annotation_spec_id:*`` --> The model evaluation was done for - aggregate of all annotation specs. + Required. An expression for filtering the results of the + request. - ``annotation_spec_id`` - for =, != or existence. + See example below for the last. Some examples of using the + filter are: - ``annotation_spec_id!=4`` –> The model + evaluation was done for annotation spec with ID different + than 4. - ``NOT annotation_spec_id:*`` –> The model + evaluation was done for aggregate of all annotation specs. page_size: Requested page size. page_token: A token identifying a page of results for the server to return. Typically obtained via [ListModelEvaluationsResponse.n - ext\_page\_token][google.cloud.automl.v1.ListModelEvaluationsR - esponse.next\_page\_token] of the previous [AutoMl.ListModelEv - aluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] + ext_page_token][google.cloud.automl.v1.ListModelEvaluationsRes + ponse.next_page_token] of the previous [AutoMl.ListModelEvalua + tions][google.cloud.automl.v1.AutoMl.ListModelEvaluations] call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListModelEvaluationsRequest) - ), + }, ) _sym_db.RegisterMessage(ListModelEvaluationsRequest) ListModelEvaluationsResponse = _reflection.GeneratedProtocolMessageType( "ListModelEvaluationsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELEVALUATIONSRESPONSE, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Response message for + { + "DESCRIPTOR": _LISTMODELEVALUATIONSRESPONSE, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. @@ -1853,29 +1854,53 @@ List of model evaluations in the requested page. next_page_token: A token to retrieve next page of results. Pass to the [ListMod - elEvaluationsRequest.page\_token][google.cloud.automl.v1.ListM - odelEvaluationsRequest.page\_token] field of a new [AutoMl.Lis - tModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEval - uations] request to obtain that page. + elEvaluationsRequest.page_token][google.cloud.automl.v1.ListMo + delEvaluationsRequest.page_token] field of a new [AutoMl.ListM + odelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvalua + tions] request to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListModelEvaluationsResponse) - ), + }, ) _sym_db.RegisterMessage(ListModelEvaluationsResponse) DESCRIPTOR._options = None +_CREATEDATASETREQUEST.fields_by_name["parent"]._options = None +_CREATEDATASETREQUEST.fields_by_name["dataset"]._options = None +_GETDATASETREQUEST.fields_by_name["name"]._options = None +_LISTDATASETSREQUEST.fields_by_name["parent"]._options = None +_UPDATEDATASETREQUEST.fields_by_name["dataset"]._options = None +_UPDATEDATASETREQUEST.fields_by_name["update_mask"]._options = None +_DELETEDATASETREQUEST.fields_by_name["name"]._options = None +_IMPORTDATAREQUEST.fields_by_name["name"]._options = None +_IMPORTDATAREQUEST.fields_by_name["input_config"]._options = None +_EXPORTDATAREQUEST.fields_by_name["name"]._options = None +_EXPORTDATAREQUEST.fields_by_name["output_config"]._options = None +_GETANNOTATIONSPECREQUEST.fields_by_name["name"]._options = None +_CREATEMODELREQUEST.fields_by_name["parent"]._options = None +_CREATEMODELREQUEST.fields_by_name["model"]._options = None +_GETMODELREQUEST.fields_by_name["name"]._options = None +_LISTMODELSREQUEST.fields_by_name["parent"]._options = None +_DELETEMODELREQUEST.fields_by_name["name"]._options = None +_UPDATEMODELREQUEST.fields_by_name["model"]._options = None +_UPDATEMODELREQUEST.fields_by_name["update_mask"]._options = None +_DEPLOYMODELREQUEST.fields_by_name["name"]._options = None +_UNDEPLOYMODELREQUEST.fields_by_name["name"]._options = None +_EXPORTMODELREQUEST.fields_by_name["name"]._options = None +_EXPORTMODELREQUEST.fields_by_name["output_config"]._options = None +_GETMODELEVALUATIONREQUEST.fields_by_name["name"]._options = None +_LISTMODELEVALUATIONSREQUEST.fields_by_name["parent"]._options = None +_LISTMODELEVALUATIONSREQUEST.fields_by_name["filter"]._options = None _AUTOML = _descriptor.ServiceDescriptor( name="AutoMl", full_name="google.cloud.automl.v1.AutoMl", file=DESCRIPTOR, index=0, - serialized_options=_b( - "\312A\025automl.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" - ), - serialized_start=2501, - serialized_end=5458, + serialized_options=b"\312A\025automl.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", + serialized_start=3236, + serialized_end=6796, methods=[ _descriptor.MethodDescriptor( name="CreateDataset", @@ -1884,9 +1909,7 @@ containing_service=None, input_type=_CREATEDATASETREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0027",/v1/{parent=projects/*/locations/*}/datasets:\007dataset' - ), + serialized_options=b'\202\323\344\223\0027",/v1/{parent=projects/*/locations/*}/datasets:\007dataset\332A\016parent,dataset\312A\034\n\007Dataset\022\021OperationMetadata', ), _descriptor.MethodDescriptor( name="GetDataset", @@ -1895,9 +1918,7 @@ containing_service=None, input_type=_GETDATASETREQUEST, output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2._DATASET, - serialized_options=_b( - "\202\323\344\223\002.\022,/v1/{name=projects/*/locations/*/datasets/*}" - ), + serialized_options=b"\202\323\344\223\002.\022,/v1/{name=projects/*/locations/*/datasets/*}\332A\004name", ), _descriptor.MethodDescriptor( name="ListDatasets", @@ -1906,9 +1927,7 @@ containing_service=None, input_type=_LISTDATASETSREQUEST, output_type=_LISTDATASETSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002.\022,/v1/{parent=projects/*/locations/*}/datasets" - ), + serialized_options=b"\202\323\344\223\002.\022,/v1/{parent=projects/*/locations/*}/datasets\332A\006parent", ), _descriptor.MethodDescriptor( name="UpdateDataset", @@ -1917,9 +1936,7 @@ containing_service=None, input_type=_UPDATEDATASETREQUEST, output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2._DATASET, - serialized_options=_b( - "\202\323\344\223\002?24/v1/{dataset.name=projects/*/locations/*/datasets/*}:\007dataset" - ), + serialized_options=b"\202\323\344\223\002?24/v1/{dataset.name=projects/*/locations/*/datasets/*}:\007dataset\332A\023dataset,update_mask", ), _descriptor.MethodDescriptor( name="DeleteDataset", @@ -1928,9 +1945,7 @@ containing_service=None, input_type=_DELETEDATASETREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\002.*,/v1/{name=projects/*/locations/*/datasets/*}" - ), + serialized_options=b"\202\323\344\223\002.*,/v1/{name=projects/*/locations/*/datasets/*}\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata", ), _descriptor.MethodDescriptor( name="ImportData", @@ -1939,9 +1954,7 @@ containing_service=None, input_type=_IMPORTDATAREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002<"7/v1/{name=projects/*/locations/*/datasets/*}:importData:\001*' - ), + serialized_options=b'\202\323\344\223\002<"7/v1/{name=projects/*/locations/*/datasets/*}:importData:\001*\332A\021name,input_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', ), _descriptor.MethodDescriptor( name="ExportData", @@ -1950,9 +1963,7 @@ containing_service=None, input_type=_EXPORTDATAREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002<"7/v1/{name=projects/*/locations/*/datasets/*}:exportData:\001*' - ), + serialized_options=b'\202\323\344\223\002<"7/v1/{name=projects/*/locations/*/datasets/*}:exportData:\001*\332A\022name,output_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', ), _descriptor.MethodDescriptor( name="GetAnnotationSpec", @@ -1961,9 +1972,7 @@ containing_service=None, input_type=_GETANNOTATIONSPECREQUEST, output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2._ANNOTATIONSPEC, - serialized_options=_b( - "\202\323\344\223\002@\022>/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}" - ), + serialized_options=b"\202\323\344\223\002@\022>/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}\332A\004name", ), _descriptor.MethodDescriptor( name="CreateModel", @@ -1972,9 +1981,7 @@ containing_service=None, input_type=_CREATEMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0023"*/v1/{parent=projects/*/locations/*}/models:\005model' - ), + serialized_options=b'\202\323\344\223\0023"*/v1/{parent=projects/*/locations/*}/models:\005model\332A\014parent,model\312A\032\n\005Model\022\021OperationMetadata', ), _descriptor.MethodDescriptor( name="GetModel", @@ -1983,9 +1990,7 @@ containing_service=None, input_type=_GETMODELREQUEST, output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2._MODEL, - serialized_options=_b( - "\202\323\344\223\002,\022*/v1/{name=projects/*/locations/*/models/*}" - ), + serialized_options=b"\202\323\344\223\002,\022*/v1/{name=projects/*/locations/*/models/*}\332A\004name", ), _descriptor.MethodDescriptor( name="ListModels", @@ -1994,9 +1999,7 @@ containing_service=None, input_type=_LISTMODELSREQUEST, output_type=_LISTMODELSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002,\022*/v1/{parent=projects/*/locations/*}/models" - ), + serialized_options=b"\202\323\344\223\002,\022*/v1/{parent=projects/*/locations/*}/models\332A\006parent", ), _descriptor.MethodDescriptor( name="DeleteModel", @@ -2005,9 +2008,7 @@ containing_service=None, input_type=_DELETEMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\002,**/v1/{name=projects/*/locations/*/models/*}" - ), + serialized_options=b"\202\323\344\223\002,**/v1/{name=projects/*/locations/*/models/*}\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata", ), _descriptor.MethodDescriptor( name="UpdateModel", @@ -2016,9 +2017,7 @@ containing_service=None, input_type=_UPDATEMODELREQUEST, output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2._MODEL, - serialized_options=_b( - "\202\323\344\223\002920/v1/{model.name=projects/*/locations/*/models/*}:\005model" - ), + serialized_options=b"\202\323\344\223\002920/v1/{model.name=projects/*/locations/*/models/*}:\005model\332A\021model,update_mask", ), _descriptor.MethodDescriptor( name="DeployModel", @@ -2027,9 +2026,7 @@ containing_service=None, input_type=_DEPLOYMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0026"1/v1/{name=projects/*/locations/*/models/*}:deploy:\001*' - ), + serialized_options=b'\202\323\344\223\0026"1/v1/{name=projects/*/locations/*/models/*}:deploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', ), _descriptor.MethodDescriptor( name="UndeployModel", @@ -2038,9 +2035,7 @@ containing_service=None, input_type=_UNDEPLOYMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0028"3/v1/{name=projects/*/locations/*/models/*}:undeploy:\001*' - ), + serialized_options=b'\202\323\344\223\0028"3/v1/{name=projects/*/locations/*/models/*}:undeploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', ), _descriptor.MethodDescriptor( name="ExportModel", @@ -2049,9 +2044,7 @@ containing_service=None, input_type=_EXPORTMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0026"1/v1/{name=projects/*/locations/*/models/*}:export:\001*' - ), + serialized_options=b'\202\323\344\223\0026"1/v1/{name=projects/*/locations/*/models/*}:export:\001*\332A\022name,output_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', ), _descriptor.MethodDescriptor( name="GetModelEvaluation", @@ -2060,9 +2053,7 @@ containing_service=None, input_type=_GETMODELEVALUATIONREQUEST, output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__evaluation__pb2._MODELEVALUATION, - serialized_options=_b( - "\202\323\344\223\002?\022=/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}" - ), + serialized_options=b"\202\323\344\223\002?\022=/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\332A\004name", ), _descriptor.MethodDescriptor( name="ListModelEvaluations", @@ -2071,9 +2062,7 @@ containing_service=None, input_type=_LISTMODELEVALUATIONSREQUEST, output_type=_LISTMODELEVALUATIONSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002?\022=/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations" - ), + serialized_options=b"\202\323\344\223\002?\022=/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations\332A\rparent,filter", ), ], ) diff --git a/google/cloud/automl_v1/proto/service_pb2_grpc.py b/google/cloud/automl_v1/proto/service_pb2_grpc.py index 0ad90914..2f9a2837 100644 --- a/google/cloud/automl_v1/proto/service_pb2_grpc.py +++ b/google/cloud/automl_v1/proto/service_pb2_grpc.py @@ -194,6 +194,14 @@ def DeleteDataset(self, request, context): def ImportData(self, request, context): """Imports data into a dataset. + For Tables this method can only be called on an empty Dataset. + + For Tables: + * A + [schema_inference_version][google.cloud.automl.v1.InputConfig.params] + parameter must be explicitly set. + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it completes. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") @@ -263,11 +271,11 @@ def DeployModel(self, request, context): same parameters has no effect. Deploying with different parametrs (as e.g. changing - [node_number][google.cloud.automl.v1.ImageObjectDetectionModelDeploymentMetadata.node_number]) + [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number]) will reset the deployment state without pausing the model's availability. - Only applicable for Text Classification, Image Object Detection; all other - domains manage deployment automatically. + Only applicable for Text Classification, Image Object Detection , Tables, and Image Segmentation; all other domains manage + deployment automatically. Returns an empty response in the [response][google.longrunning.Operation.response] field when it completes. @@ -279,7 +287,7 @@ def DeployModel(self, request, context): def UndeployModel(self, request, context): """Undeploys a model. If the model is not deployed this method has no effect. - Only applicable for Text Classification, Image Object Detection; + Only applicable for Text Classification, Image Object Detection and Tables; all other domains manage deployment automatically. Returns an empty response in the diff --git a/google/cloud/automl_v1/proto/text.proto b/google/cloud/automl_v1/proto/text.proto index bffe9634..667031b8 100644 --- a/google/cloud/automl_v1/proto/text.proto +++ b/google/cloud/automl_v1/proto/text.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,14 +11,13 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1; -import "google/api/annotations.proto"; import "google/cloud/automl/v1/classification.proto"; +import "google/api/annotations.proto"; option csharp_namespace = "Google.Cloud.AutoML.V1"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl"; @@ -41,20 +40,27 @@ message TextClassificationModelMetadata { } // Dataset metadata that is specific to text extraction -message TextExtractionDatasetMetadata {} +message TextExtractionDatasetMetadata { + +} // Model metadata that is specific to text extraction. -message TextExtractionModelMetadata {} +message TextExtractionModelMetadata { + +} // Dataset metadata for text sentiment. message TextSentimentDatasetMetadata { - // Required. A sentiment is expressed as an integer ordinal, where higher - // value means a more positive sentiment. The range of sentiments that will be - // used is between 0 and sentiment_max (inclusive on both ends), and all the - // values in the range must be represented in the dataset before a model can - // be created. sentiment_max value must be between 1 and 10 (inclusive). + // Required. A sentiment is expressed as an integer ordinal, where higher value + // means a more positive sentiment. The range of sentiments that will be used + // is between 0 and sentiment_max (inclusive on both ends), and all the values + // in the range must be represented in the dataset before a model can be + // created. + // sentiment_max value must be between 1 and 10 (inclusive). int32 sentiment_max = 1; } // Model metadata that is specific to text sentiment. -message TextSentimentModelMetadata {} +message TextSentimentModelMetadata { + +} diff --git a/google/cloud/automl_v1/proto/text_extraction.proto b/google/cloud/automl_v1/proto/text_extraction.proto index 02119f5c..37a31e71 100644 --- a/google/cloud/automl_v1/proto/text_extraction.proto +++ b/google/cloud/automl_v1/proto/text_extraction.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1/proto/text_extraction_pb2.py b/google/cloud/automl_v1/proto/text_extraction_pb2.py index c1106e25..a93f26f8 100644 --- a/google/cloud/automl_v1/proto/text_extraction_pb2.py +++ b/google/cloud/automl_v1/proto/text_extraction_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1/proto/text_extraction.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -25,12 +22,8 @@ name="google/cloud/automl_v1/proto/text_extraction.proto", package="google.cloud.automl.v1", syntax="proto3", - serialized_options=_b( - "\n\032com.google.cloud.automl.v1P\001Z>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') + >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> - >>> # TODO: Initialize `dataset`: - >>> dataset = {} + >>> response = client.delete_dataset(name) >>> - >>> response = client.create_dataset(parent, dataset) + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - parent (str): The resource name of the project to create the dataset for. - dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.Dataset` + name (str): Required. The resource name of the dataset to delete. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -327,7 +333,7 @@ def create_dataset( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -337,22 +343,22 @@ def create_dataset( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "create_dataset" not in self._inner_api_calls: + if "delete_dataset" not in self._inner_api_calls: self._inner_api_calls[ - "create_dataset" + "delete_dataset" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_dataset, - default_retry=self._method_configs["CreateDataset"].retry, - default_timeout=self._method_configs["CreateDataset"].timeout, + self.transport.delete_dataset, + default_retry=self._method_configs["DeleteDataset"].retry, + default_timeout=self._method_configs["DeleteDataset"].timeout, client_info=self._client_info, ) - request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset) + request = service_pb2.DeleteDatasetRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -361,40 +367,63 @@ def create_dataset( ) metadata.append(routing_metadata) - return self._inner_api_calls["create_dataset"]( + operation = self._inner_api_calls["delete_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, + ) - def update_dataset( + def import_data( self, - dataset, - update_mask=None, + name, + input_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Updates a dataset. + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A ``schema_inference_version`` parameter must be explicitly set. + Returns an empty response in the ``response`` field when it + completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> # TODO: Initialize `dataset`: - >>> dataset = {} + >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> - >>> response = client.update_dataset(dataset) + >>> # TODO: Initialize `input_config`: + >>> input_config = {} + >>> + >>> response = client.import_data(name, input_config) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset which replaces the resource on the server. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.Dataset` - update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. + name (str): Required. Dataset name. Dataset must already exist. All imported + annotations and examples will be added. + input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input location and its domain specific semantics, + if any. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + message :class:`~google.cloud.automl_v1beta1.types.InputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -405,7 +434,7 @@ def update_dataset( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -415,24 +444,22 @@ def update_dataset( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "update_dataset" not in self._inner_api_calls: + if "import_data" not in self._inner_api_calls: self._inner_api_calls[ - "update_dataset" + "import_data" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_dataset, - default_retry=self._method_configs["UpdateDataset"].retry, - default_timeout=self._method_configs["UpdateDataset"].timeout, + self.transport.import_data, + default_retry=self._method_configs["ImportData"].retry, + default_timeout=self._method_configs["ImportData"].timeout, client_info=self._client_info, ) - request = service_pb2.UpdateDatasetRequest( - dataset=dataset, update_mask=update_mask - ) + request = service_pb2.ImportDataRequest(name=name, input_config=input_config,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("dataset.name", dataset.name)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -441,19 +468,27 @@ def update_dataset( ) metadata.append(routing_metadata) - return self._inner_api_calls["update_dataset"]( + operation = self._inner_api_calls["import_data"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, + ) - def get_dataset( + def export_data( self, name, + output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Gets a dataset. + Exports dataset's data to the provided output location. Returns an + empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 @@ -462,10 +497,26 @@ def get_dataset( >>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> - >>> response = client.get_dataset(name) + >>> # TODO: Initialize `output_config`: + >>> output_config = {} + >>> + >>> response = client.export_data(name, output_config) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - name (str): The resource name of the dataset to retrieve. + name (str): Required. The resource name of the dataset. + output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output location. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.OutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -476,7 +527,7 @@ def get_dataset( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -486,17 +537,17 @@ def get_dataset( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_dataset" not in self._inner_api_calls: + if "export_data" not in self._inner_api_calls: self._inner_api_calls[ - "get_dataset" + "export_data" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_dataset, - default_retry=self._method_configs["GetDataset"].retry, - default_timeout=self._method_configs["GetDataset"].timeout, + self.transport.export_data, + default_retry=self._method_configs["ExportData"].retry, + default_timeout=self._method_configs["ExportData"].timeout, client_info=self._client_info, ) - request = service_pb2.GetDatasetRequest(name=name) + request = service_pb2.ExportDataRequest(name=name, output_config=output_config,) if metadata is None: metadata = [] metadata = list(metadata) @@ -510,58 +561,48 @@ def get_dataset( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_dataset"]( + operation = self._inner_api_calls["export_data"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, + ) - def list_datasets( + def delete_model( self, - parent, - filter_=None, - page_size=None, + name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Lists datasets in a project. + Deletes a model. Returns ``google.protobuf.Empty`` in the + ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> # Iterate over all results - >>> for element in client.list_datasets(parent): - ... # process element - ... pass + >>> response = client.delete_model(name) >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() >>> - >>> # Alternatively: + >>> response.add_done_callback(callback) >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_datasets(parent).pages: - ... for element in page: - ... # process element - ... pass + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - parent (str): The resource name of the project from which to list datasets. - filter_ (str): An expression for filtering the results of the request. - - - ``dataset_metadata`` - for existence of the case (e.g. - image\_classification\_dataset\_metadata:\*). Some examples of using - the filter are: - - - ``translation_dataset_metadata:*`` --> The dataset has - translation\_dataset\_metadata. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. + name (str): Required. Resource name of the model being deleted. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -572,10 +613,7 @@ def list_datasets( that is provided to the method. Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances. - You can also iterate over the pages of the response - using its `pages` property. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -585,24 +623,22 @@ def list_datasets( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_datasets" not in self._inner_api_calls: + if "delete_model" not in self._inner_api_calls: self._inner_api_calls[ - "list_datasets" + "delete_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_datasets, - default_retry=self._method_configs["ListDatasets"].retry, - default_timeout=self._method_configs["ListDatasets"].timeout, + self.transport.delete_model, + default_retry=self._method_configs["DeleteModel"].retry, + default_timeout=self._method_configs["DeleteModel"].timeout, client_info=self._client_info, ) - request = service_pb2.ListDatasetsRequest( - parent=parent, filter=filter_, page_size=page_size - ) + request = service_pb2.DeleteModelRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -611,41 +647,44 @@ def list_datasets( ) metadata.append(routing_metadata) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_datasets"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="datasets", - request_token_field="page_token", - response_token_field="next_page_token", + operation = self._inner_api_calls["delete_model"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, ) - return iterator - def delete_dataset( + def export_model( self, name, + output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Deletes a dataset and all of its contents. Returns empty response in the - ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. + Exports a trained, "export-able", model to a user specified Google + Cloud Storage location. A model is considered export-able if and only if + it has an export format defined for it in + + ``ModelExportOutputConfig``. + + Returns an empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> response = client.delete_dataset(name) + >>> # TODO: Initialize `output_config`: + >>> output_config = {} + >>> + >>> response = client.export_model(name, output_config) >>> >>> def callback(operation_future): ... # Handle result. @@ -657,7 +696,11 @@ def delete_dataset( >>> metadata = response.metadata() Args: - name (str): The resource name of the dataset to delete. + name (str): Required. The resource name of the model to export. + output_config (Union[dict, ~google.cloud.automl_v1beta1.types.ModelExportOutputConfig]): Required. The desired output location and configuration. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.ModelExportOutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -678,17 +721,19 @@ def delete_dataset( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "delete_dataset" not in self._inner_api_calls: + if "export_model" not in self._inner_api_calls: self._inner_api_calls[ - "delete_dataset" + "export_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_dataset, - default_retry=self._method_configs["DeleteDataset"].retry, - default_timeout=self._method_configs["DeleteDataset"].timeout, + self.transport.export_model, + default_retry=self._method_configs["ExportModel"].retry, + default_timeout=self._method_configs["ExportModel"].timeout, client_info=self._client_info, ) - request = service_pb2.DeleteDatasetRequest(name=name) + request = service_pb2.ExportModelRequest( + name=name, output_config=output_config, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -702,7 +747,7 @@ def delete_dataset( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["delete_dataset"]( + operation = self._inner_api_calls["export_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( @@ -712,35 +757,39 @@ def delete_dataset( metadata_type=proto_operations_pb2.OperationMetadata, ) - def import_data( + def export_evaluated_examples( self, name, - input_config, + output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Imports data into a dataset. For Tables this method can only be called - on an empty Dataset. + Exports examples on which the model was evaluated (i.e. which were + in the TEST set of the dataset the model was created from), together + with their ground truth annotations and the annotations created + (predicted) by the model. The examples, ground truth and predictions are + exported in the state they were at the moment the model was evaluated. - For Tables: + This export is available only for 30 days since the model evaluation is + created. - - A ``schema_inference_version`` parameter must be explicitly set. - Returns an empty response in the ``response`` field when it - completes. + Currently only available for Tables. + + Returns an empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> # TODO: Initialize `input_config`: - >>> input_config = {} + >>> # TODO: Initialize `output_config`: + >>> output_config = {} >>> - >>> response = client.import_data(name, input_config) + >>> response = client.export_evaluated_examples(name, output_config) >>> >>> def callback(operation_future): ... # Handle result. @@ -752,13 +801,12 @@ def import_data( >>> metadata = response.metadata() Args: - name (str): Required. Dataset name. Dataset must already exist. All imported - annotations and examples will be added. - input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input location and its domain specific semantics, - if any. + name (str): Required. The resource name of the model whose evaluated examples are to + be exported. + output_config (Union[dict, ~google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig]): Required. The desired output location and configuration. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.InputConfig` + message :class:`~google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -779,17 +827,19 @@ def import_data( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "import_data" not in self._inner_api_calls: + if "export_evaluated_examples" not in self._inner_api_calls: self._inner_api_calls[ - "import_data" + "export_evaluated_examples" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.import_data, - default_retry=self._method_configs["ImportData"].retry, - default_timeout=self._method_configs["ImportData"].timeout, + self.transport.export_evaluated_examples, + default_retry=self._method_configs["ExportEvaluatedExamples"].retry, + default_timeout=self._method_configs["ExportEvaluatedExamples"].timeout, client_info=self._client_info, ) - request = service_pb2.ImportDataRequest(name=name, input_config=input_config) + request = service_pb2.ExportEvaluatedExamplesRequest( + name=name, output_config=output_config, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -803,7 +853,7 @@ def import_data( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["import_data"]( + operation = self._inner_api_calls["export_evaluated_examples"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( @@ -813,45 +863,59 @@ def import_data( metadata_type=proto_operations_pb2.OperationMetadata, ) - def export_data( + def list_model_evaluations( self, - name, - output_config, + parent, + filter_=None, + page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Exports dataset's data to the provided output location. Returns an empty - response in the ``response`` field when it completes. + Lists model evaluations. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') - >>> - >>> # TODO: Initialize `output_config`: - >>> output_config = {} + >>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> response = client.export_data(name, output_config) + >>> # Iterate over all results + >>> for element in client.list_model_evaluations(parent): + ... # process element + ... pass >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() >>> - >>> response.add_done_callback(callback) + >>> # Alternatively: >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> # Iterate over results one page at a time + >>> for page in client.list_model_evaluations(parent).pages: + ... for element in page: + ... # process element + ... pass Args: - name (str): Required. The resource name of the dataset. - output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output location. + parent (str): Required. Resource name of the model to list the model evaluations for. + If modelId is set as "-", this will list model evaluations from across all + models of the parent location. + filter_ (str): An expression for filtering the results of the request. - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.OutputConfig` + - ``annotation_spec_id`` - for =, != or existence. See example below + for the last. + + Some examples of using the filter are: + + - ``annotation_spec_id!=4`` --> The model evaluation was done for + annotation spec with ID different than 4. + - ``NOT annotation_spec_id:*`` --> The model evaluation was done for + aggregate of all annotation specs. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -862,7 +926,10 @@ def export_data( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. + You can also iterate over the pages of the response + using its `pages` property. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -872,22 +939,24 @@ def export_data( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "export_data" not in self._inner_api_calls: + if "list_model_evaluations" not in self._inner_api_calls: self._inner_api_calls[ - "export_data" + "list_model_evaluations" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.export_data, - default_retry=self._method_configs["ExportData"].retry, - default_timeout=self._method_configs["ExportData"].timeout, + self.transport.list_model_evaluations, + default_retry=self._method_configs["ListModelEvaluations"].retry, + default_timeout=self._method_configs["ListModelEvaluations"].timeout, client_info=self._client_info, ) - request = service_pb2.ExportDataRequest(name=name, output_config=output_config) + request = service_pb2.ListModelEvaluationsRequest( + parent=parent, filter=filter_, page_size=page_size, + ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -896,29 +965,31 @@ def export_data( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["export_data"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_model_evaluations"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="model_evaluation", + request_token_field="page_token", + response_token_field="next_page_token", ) + return iterator - def create_model( + def create_dataset( self, parent, - model, + dataset, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Creates a model. Returns a Model in the ``response`` field when it - completes. When you create a model, several model evaluations are - created for it: a global evaluation, and one evaluation for each - annotation spec. + Creates a dataset. Example: >>> from google.cloud import automl_v1beta1 @@ -927,26 +998,17 @@ def create_model( >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> - >>> # TODO: Initialize `model`: - >>> model = {} - >>> - >>> response = client.create_model(parent, model) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) + >>> # TODO: Initialize `dataset`: + >>> dataset = {} >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.create_dataset(parent, dataset) Args: - parent (str): Resource name of the parent project where the model is being created. - model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): The model to create. + parent (str): Required. The resource name of the project to create the dataset for. + dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): Required. The dataset to create. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.Model` + message :class:`~google.cloud.automl_v1beta1.types.Dataset` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -957,7 +1019,7 @@ def create_model( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -967,17 +1029,17 @@ def create_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "create_model" not in self._inner_api_calls: + if "create_dataset" not in self._inner_api_calls: self._inner_api_calls[ - "create_model" + "create_dataset" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_model, - default_retry=self._method_configs["CreateModel"].retry, - default_timeout=self._method_configs["CreateModel"].timeout, + self.transport.create_dataset, + default_retry=self._method_configs["CreateDataset"].retry, + default_timeout=self._method_configs["CreateDataset"].timeout, client_info=self._client_info, ) - request = service_pb2.CreateModelRequest(parent=parent, model=model) + request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset,) if metadata is None: metadata = [] metadata = list(metadata) @@ -991,17 +1053,11 @@ def create_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["create_model"]( + return self._inner_api_calls["create_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - model_pb2.Model, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - def get_model( + def get_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, @@ -1009,19 +1065,19 @@ def get_model( metadata=None, ): """ - Gets a model. + Gets a dataset. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') + >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> - >>> response = client.get_model(name) + >>> response = client.get_dataset(name) Args: - name (str): Resource name of the model. + name (str): Required. The resource name of the dataset to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1032,7 +1088,7 @@ def get_model( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.Model` instance. + A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1042,17 +1098,17 @@ def get_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_model" not in self._inner_api_calls: + if "get_dataset" not in self._inner_api_calls: self._inner_api_calls[ - "get_model" + "get_dataset" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_model, - default_retry=self._method_configs["GetModel"].retry, - default_timeout=self._method_configs["GetModel"].timeout, + self.transport.get_dataset, + default_retry=self._method_configs["GetDataset"].retry, + default_timeout=self._method_configs["GetDataset"].timeout, client_info=self._client_info, ) - request = service_pb2.GetModelRequest(name=name) + request = service_pb2.GetDatasetRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1066,11 +1122,11 @@ def get_model( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_model"]( + return self._inner_api_calls["get_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def list_models( + def list_datasets( self, parent, filter_=None, @@ -1080,7 +1136,7 @@ def list_models( metadata=None, ): """ - Lists models. + Lists datasets in a project. Example: >>> from google.cloud import automl_v1beta1 @@ -1090,7 +1146,7 @@ def list_models( >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # Iterate over all results - >>> for element in client.list_models(parent): + >>> for element in client.list_datasets(parent): ... # process element ... pass >>> @@ -1098,24 +1154,21 @@ def list_models( >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_models(parent).pages: + >>> for page in client.list_datasets(parent).pages: ... for element in page: ... # process element ... pass Args: - parent (str): Resource name of the project, from which to list the models. + parent (str): Required. The resource name of the project from which to list datasets. filter_ (str): An expression for filtering the results of the request. - - ``model_metadata`` - for existence of the case (e.g. - video\_classification\_model\_metadata:\*). - - - ``dataset_id`` - for = or !=. Some examples of using the filter are: - - - ``image_classification_model_metadata:*`` --> The model has - image\_classification\_model\_metadata. + - ``dataset_metadata`` - for existence of the case (e.g. + image_classification_dataset_metadata:*). Some examples of using the + filter are: - - ``dataset_id=5`` --> The model was created from a dataset with ID 5. + - ``translation_dataset_metadata:*`` --> The dataset has + translation_dataset_metadata. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page @@ -1132,7 +1185,7 @@ def list_models( Returns: A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances. + An iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances. You can also iterate over the pages of the response using its `pages` property. @@ -1144,18 +1197,18 @@ def list_models( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_models" not in self._inner_api_calls: + if "list_datasets" not in self._inner_api_calls: self._inner_api_calls[ - "list_models" + "list_datasets" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_models, - default_retry=self._method_configs["ListModels"].retry, - default_timeout=self._method_configs["ListModels"].timeout, + self.transport.list_datasets, + default_retry=self._method_configs["ListDatasets"].retry, + default_timeout=self._method_configs["ListDatasets"].timeout, client_info=self._client_info, ) - request = service_pb2.ListModelsRequest( - parent=parent, filter=filter_, page_size=page_size + request = service_pb2.ListDatasetsRequest( + parent=parent, filter=filter_, page_size=page_size, ) if metadata is None: metadata = [] @@ -1173,50 +1226,48 @@ def list_models( iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( - self._inner_api_calls["list_models"], + self._inner_api_calls["list_datasets"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, - items_field="model", + items_field="datasets", request_token_field="page_token", response_token_field="next_page_token", ) return iterator - def delete_model( + def update_dataset( self, - name, + dataset, + update_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Deletes a model. Returns ``google.protobuf.Empty`` in the ``response`` - field when it completes, and ``delete_details`` in the ``metadata`` - field. + Updates a dataset. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') + >>> # TODO: Initialize `dataset`: + >>> dataset = {} >>> - >>> response = client.delete_model(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.update_dataset(dataset) Args: - name (str): Resource name of the model being deleted. + dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): Required. The dataset which replaces the resource on the server. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.Dataset` + update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1227,7 +1278,7 @@ def delete_model( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1237,22 +1288,24 @@ def delete_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "delete_model" not in self._inner_api_calls: + if "update_dataset" not in self._inner_api_calls: self._inner_api_calls[ - "delete_model" + "update_dataset" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_model, - default_retry=self._method_configs["DeleteModel"].retry, - default_timeout=self._method_configs["DeleteModel"].timeout, + self.transport.update_dataset, + default_retry=self._method_configs["UpdateDataset"].retry, + default_timeout=self._method_configs["UpdateDataset"].timeout, client_info=self._client_info, ) - request = service_pb2.DeleteModelRequest(name=name) + request = service_pb2.UpdateDatasetRequest( + dataset=dataset, update_mask=update_mask, + ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("dataset.name", dataset.name)] except AttributeError: pass else: @@ -1261,66 +1314,31 @@ def delete_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["delete_model"]( + return self._inner_api_calls["update_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - def deploy_model( + def get_annotation_spec( self, name, - image_object_detection_model_deployment_metadata=None, - image_classification_model_deployment_metadata=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Deploys a model. If a model is already deployed, deploying it with the - same parameters has no effect. Deploying with different parametrs (as - e.g. changing - - ``node_number``) will reset the deployment state without pausing the - model's availability. - - Only applicable for Text Classification, Image Object Detection and - Tables; all other domains manage deployment automatically. - - Returns an empty response in the ``response`` field when it completes. + Gets an annotation spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> response = client.deploy_model(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) + >>> name = client.annotation_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[ANNOTATION_SPEC]') >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.get_annotation_spec(name) Args: - name (str): Resource name of the model to deploy. - image_object_detection_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata]): Model deployment metadata specific to Image Object Detection. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata` - image_classification_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata]): Model deployment metadata specific to Image Classification. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata` + name (str): Required. The resource name of the annotation spec to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1331,7 +1349,7 @@ def deploy_model( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.cloud.automl_v1beta1.types.AnnotationSpec` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1341,28 +1359,17 @@ def deploy_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "deploy_model" not in self._inner_api_calls: + if "get_annotation_spec" not in self._inner_api_calls: self._inner_api_calls[ - "deploy_model" + "get_annotation_spec" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.deploy_model, - default_retry=self._method_configs["DeployModel"].retry, - default_timeout=self._method_configs["DeployModel"].timeout, + self.transport.get_annotation_spec, + default_retry=self._method_configs["GetAnnotationSpec"].retry, + default_timeout=self._method_configs["GetAnnotationSpec"].timeout, client_info=self._client_info, ) - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, - image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, - ) - - request = service_pb2.DeployModelRequest( - name=name, - image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, - image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, - ) + request = service_pb2.GetAnnotationSpecRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1376,52 +1383,36 @@ def deploy_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["deploy_model"]( + return self._inner_api_calls["get_annotation_spec"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - def undeploy_model( + def get_table_spec( self, name, + field_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Undeploys a model. If the model is not deployed this method has no - effect. - - Only applicable for Text Classification, Image Object Detection and - Tables; all other domains manage deployment automatically. - - Returns an empty response in the ``response`` field when it completes. + Gets a table spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> response = client.undeploy_model(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) + >>> name = client.table_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]') >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.get_table_spec(name) Args: - name (str): Resource name of the model to undeploy. + name (str): Required. The resource name of the table spec to retrieve. + field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1432,7 +1423,7 @@ def undeploy_model( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1442,17 +1433,17 @@ def undeploy_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "undeploy_model" not in self._inner_api_calls: + if "get_table_spec" not in self._inner_api_calls: self._inner_api_calls[ - "undeploy_model" + "get_table_spec" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.undeploy_model, - default_retry=self._method_configs["UndeployModel"].retry, - default_timeout=self._method_configs["UndeployModel"].timeout, + self.transport.get_table_spec, + default_retry=self._method_configs["GetTableSpec"].retry, + default_timeout=self._method_configs["GetTableSpec"].timeout, client_info=self._client_info, ) - request = service_pb2.UndeployModelRequest(name=name) + request = service_pb2.GetTableSpecRequest(name=name, field_mask=field_mask,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1466,37 +1457,56 @@ def undeploy_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["undeploy_model"]( + return self._inner_api_calls["get_table_spec"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - def get_model_evaluation( + def list_table_specs( self, - name, + parent, + field_mask=None, + filter_=None, + page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Gets a model evaluation. + Lists table specs in a dataset. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]') + >>> parent = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> - >>> response = client.get_model_evaluation(name) + >>> # Iterate over all results + >>> for element in client.list_table_specs(parent): + ... # process element + ... pass + >>> + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time + >>> for page in client.list_table_specs(parent).pages: + ... for element in page: + ... # process element + ... pass Args: - name (str): Resource name for the model evaluation. + parent (str): Required. The resource name of the dataset to list table specs from. + field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + filter_ (str): Filter expression, see go/filtering. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1507,7 +1517,10 @@ def get_model_evaluation( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance. + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.automl_v1beta1.types.TableSpec` instances. + You can also iterate over the pages of the response + using its `pages` property. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1517,22 +1530,24 @@ def get_model_evaluation( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_model_evaluation" not in self._inner_api_calls: + if "list_table_specs" not in self._inner_api_calls: self._inner_api_calls[ - "get_model_evaluation" + "list_table_specs" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_model_evaluation, - default_retry=self._method_configs["GetModelEvaluation"].retry, - default_timeout=self._method_configs["GetModelEvaluation"].timeout, + self.transport.list_table_specs, + default_retry=self._method_configs["ListTableSpecs"].retry, + default_timeout=self._method_configs["ListTableSpecs"].timeout, client_info=self._client_info, ) - request = service_pb2.GetModelEvaluationRequest(name=name) + request = service_pb2.ListTableSpecsRequest( + parent=parent, field_mask=field_mask, filter=filter_, page_size=page_size, + ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -1541,55 +1556,52 @@ def get_model_evaluation( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_model_evaluation"]( - request, retry=retry, timeout=timeout, metadata=metadata + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_table_specs"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="table_specs", + request_token_field="page_token", + response_token_field="next_page_token", ) + return iterator - def export_model( + def update_table_spec( self, - name, - output_config, + table_spec, + update_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Exports a trained, "export-able", model to a user specified Google Cloud - Storage location. A model is considered export-able if and only if it - has an export format defined for it in - - ``ModelExportOutputConfig``. - - Returns an empty response in the ``response`` field when it completes. + Updates a table spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> # TODO: Initialize `output_config`: - >>> output_config = {} - >>> - >>> response = client.export_model(name, output_config) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) + >>> # TODO: Initialize `table_spec`: + >>> table_spec = {} >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.update_table_spec(table_spec) Args: - name (str): Required. The resource name of the model to export. - output_config (Union[dict, ~google.cloud.automl_v1beta1.types.ModelExportOutputConfig]): Required. The desired output location and configuration. + table_spec (Union[dict, ~google.cloud.automl_v1beta1.types.TableSpec]): Required. The table spec which replaces the resource on the server. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ModelExportOutputConfig` - retry (Optional[google.api_core.retry.Retry]): A retry object used + message :class:`~google.cloud.automl_v1beta1.types.TableSpec` + update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait @@ -1599,7 +1611,7 @@ def export_model( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1609,22 +1621,24 @@ def export_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "export_model" not in self._inner_api_calls: + if "update_table_spec" not in self._inner_api_calls: self._inner_api_calls[ - "export_model" + "update_table_spec" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.export_model, - default_retry=self._method_configs["ExportModel"].retry, - default_timeout=self._method_configs["ExportModel"].timeout, + self.transport.update_table_spec, + default_retry=self._method_configs["UpdateTableSpec"].retry, + default_timeout=self._method_configs["UpdateTableSpec"].timeout, client_info=self._client_info, ) - request = service_pb2.ExportModelRequest(name=name, output_config=output_config) + request = service_pb2.UpdateTableSpecRequest( + table_spec=table_spec, update_mask=update_mask, + ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("table_spec.name", table_spec.name)] except AttributeError: pass else: @@ -1633,66 +1647,36 @@ def export_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["export_model"]( + return self._inner_api_calls["update_table_spec"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - def export_evaluated_examples( + def get_column_spec( self, name, - output_config, + field_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Exports examples on which the model was evaluated (i.e. which were in - the TEST set of the dataset the model was created from), together with - their ground truth annotations and the annotations created (predicted) - by the model. The examples, ground truth and predictions are exported in - the state they were at the moment the model was evaluated. - - This export is available only for 30 days since the model evaluation is - created. - - Currently only available for Tables. - - Returns an empty response in the ``response`` field when it completes. + Gets a column spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> # TODO: Initialize `output_config`: - >>> output_config = {} - >>> - >>> response = client.export_evaluated_examples(name, output_config) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) + >>> name = client.column_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]', '[COLUMN_SPEC]') >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.get_column_spec(name) Args: - name (str): Required. The resource name of the model whose evaluated examples are to - be exported. - output_config (Union[dict, ~google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig]): Required. The desired output location and configuration. + name (str): Required. The resource name of the column spec to retrieve. + field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig` + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1703,7 +1687,7 @@ def export_evaluated_examples( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1713,19 +1697,17 @@ def export_evaluated_examples( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "export_evaluated_examples" not in self._inner_api_calls: + if "get_column_spec" not in self._inner_api_calls: self._inner_api_calls[ - "export_evaluated_examples" + "get_column_spec" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.export_evaluated_examples, - default_retry=self._method_configs["ExportEvaluatedExamples"].retry, - default_timeout=self._method_configs["ExportEvaluatedExamples"].timeout, + self.transport.get_column_spec, + default_retry=self._method_configs["GetColumnSpec"].retry, + default_timeout=self._method_configs["GetColumnSpec"].timeout, client_info=self._client_info, ) - request = service_pb2.ExportEvaluatedExamplesRequest( - name=name, output_config=output_config - ) + request = service_pb2.GetColumnSpecRequest(name=name, field_mask=field_mask,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1739,19 +1721,14 @@ def export_evaluated_examples( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["export_evaluated_examples"]( + return self._inner_api_calls["get_column_spec"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - def list_model_evaluations( + def list_column_specs( self, parent, + field_mask=None, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, @@ -1759,17 +1736,17 @@ def list_model_evaluations( metadata=None, ): """ - Lists model evaluations. + Lists column specs in a table spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') + >>> parent = client.table_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]') >>> >>> # Iterate over all results - >>> for element in client.list_model_evaluations(parent): + >>> for element in client.list_column_specs(parent): ... # process element ... pass >>> @@ -1777,26 +1754,18 @@ def list_model_evaluations( >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_model_evaluations(parent).pages: + >>> for page in client.list_column_specs(parent).pages: ... for element in page: ... # process element ... pass Args: - parent (str): Resource name of the model to list the model evaluations for. - If modelId is set as "-", this will list model evaluations from across all - models of the parent location. - filter_ (str): An expression for filtering the results of the request. - - - ``annotation_spec_id`` - for =, != or existence. See example below - for the last. - - Some examples of using the filter are: + parent (str): Required. The resource name of the table spec to list column specs from. + field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. - - ``annotation_spec_id!=4`` --> The model evaluation was done for - annotation spec with ID different than 4. - - ``NOT annotation_spec_id:*`` --> The model evaluation was done for - aggregate of all annotation specs. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + filter_ (str): Filter expression, see go/filtering. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page @@ -1813,7 +1782,7 @@ def list_model_evaluations( Returns: A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. + An iterable of :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instances. You can also iterate over the pages of the response using its `pages` property. @@ -1825,18 +1794,18 @@ def list_model_evaluations( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_model_evaluations" not in self._inner_api_calls: + if "list_column_specs" not in self._inner_api_calls: self._inner_api_calls[ - "list_model_evaluations" + "list_column_specs" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_model_evaluations, - default_retry=self._method_configs["ListModelEvaluations"].retry, - default_timeout=self._method_configs["ListModelEvaluations"].timeout, + self.transport.list_column_specs, + default_retry=self._method_configs["ListColumnSpecs"].retry, + default_timeout=self._method_configs["ListColumnSpecs"].timeout, client_info=self._client_info, ) - request = service_pb2.ListModelEvaluationsRequest( - parent=parent, filter=filter_, page_size=page_size + request = service_pb2.ListColumnSpecsRequest( + parent=parent, field_mask=field_mask, filter=filter_, page_size=page_size, ) if metadata is None: metadata = [] @@ -1854,39 +1823,48 @@ def list_model_evaluations( iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( - self._inner_api_calls["list_model_evaluations"], + self._inner_api_calls["list_column_specs"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, - items_field="model_evaluation", + items_field="column_specs", request_token_field="page_token", response_token_field="next_page_token", ) return iterator - def get_annotation_spec( + def update_column_spec( self, - name, + column_spec, + update_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Gets an annotation spec. + Updates a column spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.annotation_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[ANNOTATION_SPEC]') + >>> # TODO: Initialize `column_spec`: + >>> column_spec = {} >>> - >>> response = client.get_annotation_spec(name) + >>> response = client.update_column_spec(column_spec) Args: - name (str): The resource name of the annotation spec to retrieve. + column_spec (Union[dict, ~google.cloud.automl_v1beta1.types.ColumnSpec]): Required. The column spec which replaces the resource on the server. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` + update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1897,7 +1875,7 @@ def get_annotation_spec( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.AnnotationSpec` instance. + A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1907,22 +1885,24 @@ def get_annotation_spec( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_annotation_spec" not in self._inner_api_calls: + if "update_column_spec" not in self._inner_api_calls: self._inner_api_calls[ - "get_annotation_spec" + "update_column_spec" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_annotation_spec, - default_retry=self._method_configs["GetAnnotationSpec"].retry, - default_timeout=self._method_configs["GetAnnotationSpec"].timeout, + self.transport.update_column_spec, + default_retry=self._method_configs["UpdateColumnSpec"].retry, + default_timeout=self._method_configs["UpdateColumnSpec"].timeout, client_info=self._client_info, ) - request = service_pb2.GetAnnotationSpecRequest(name=name) + request = service_pb2.UpdateColumnSpecRequest( + column_spec=column_spec, update_mask=update_mask, + ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("column_spec.name", column_spec.name)] except AttributeError: pass else: @@ -1931,36 +1911,51 @@ def get_annotation_spec( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_annotation_spec"]( + return self._inner_api_calls["update_column_spec"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def get_table_spec( + def create_model( self, - name, - field_mask=None, + parent, + model, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Gets a table spec. + Creates a model. Returns a Model in the ``response`` field when it + completes. When you create a model, several model evaluations are + created for it: a global evaluation, and one evaluation for each + annotation spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.table_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]') + >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> - >>> response = client.get_table_spec(name) + >>> # TODO: Initialize `model`: + >>> model = {} + >>> + >>> response = client.create_model(parent, model) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - name (str): The resource name of the table spec to retrieve. - field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. + parent (str): Required. Resource name of the parent project where the model is being created. + model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): Required. The model to create. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + message :class:`~google.cloud.automl_v1beta1.types.Model` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1971,7 +1966,7 @@ def get_table_spec( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1981,22 +1976,22 @@ def get_table_spec( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_table_spec" not in self._inner_api_calls: + if "create_model" not in self._inner_api_calls: self._inner_api_calls[ - "get_table_spec" + "create_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_table_spec, - default_retry=self._method_configs["GetTableSpec"].retry, - default_timeout=self._method_configs["GetTableSpec"].timeout, + self.transport.create_model, + default_retry=self._method_configs["CreateModel"].retry, + default_timeout=self._method_configs["CreateModel"].timeout, client_info=self._client_info, ) - request = service_pb2.GetTableSpecRequest(name=name, field_mask=field_mask) + request = service_pb2.CreateModelRequest(parent=parent, model=model,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -2005,56 +2000,37 @@ def get_table_spec( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_table_spec"]( + operation = self._inner_api_calls["create_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + model_pb2.Model, + metadata_type=proto_operations_pb2.OperationMetadata, + ) - def list_table_specs( + def get_model( self, - parent, - field_mask=None, - filter_=None, - page_size=None, + name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Lists table specs in a dataset. + Gets a model. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') - >>> - >>> # Iterate over all results - >>> for element in client.list_table_specs(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_table_specs(parent).pages: - ... for element in page: - ... # process element - ... pass + >>> response = client.get_model(name) Args: - parent (str): The resource name of the dataset to list table specs from. - field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` - filter_ (str): Filter expression, see go/filtering. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. + name (str): Required. Resource name of the model. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2065,10 +2041,7 @@ def list_table_specs( that is provided to the method. Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.TableSpec` instances. - You can also iterate over the pages of the response - using its `pages` property. + A :class:`~google.cloud.automl_v1beta1.types.Model` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -2078,24 +2051,22 @@ def list_table_specs( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_table_specs" not in self._inner_api_calls: + if "get_model" not in self._inner_api_calls: self._inner_api_calls[ - "list_table_specs" + "get_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_table_specs, - default_retry=self._method_configs["ListTableSpecs"].retry, - default_timeout=self._method_configs["ListTableSpecs"].timeout, + self.transport.get_model, + default_retry=self._method_configs["GetModel"].retry, + default_timeout=self._method_configs["GetModel"].timeout, client_info=self._client_info, ) - request = service_pb2.ListTableSpecsRequest( - parent=parent, field_mask=field_mask, filter=filter_, page_size=page_size - ) + request = service_pb2.GetModelRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -2104,51 +2075,61 @@ def list_table_specs( ) metadata.append(routing_metadata) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_table_specs"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="table_specs", - request_token_field="page_token", - response_token_field="next_page_token", + return self._inner_api_calls["get_model"]( + request, retry=retry, timeout=timeout, metadata=metadata ) - return iterator - def update_table_spec( + def list_models( self, - table_spec, - update_mask=None, + parent, + filter_=None, + page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Updates a table spec. + Lists models. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> # TODO: Initialize `table_spec`: - >>> table_spec = {} + >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> - >>> response = client.update_table_spec(table_spec) + >>> # Iterate over all results + >>> for element in client.list_models(parent): + ... # process element + ... pass + >>> + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time + >>> for page in client.list_models(parent).pages: + ... for element in page: + ... # process element + ... pass Args: - table_spec (Union[dict, ~google.cloud.automl_v1beta1.types.TableSpec]): The table spec which replaces the resource on the server. + parent (str): Required. Resource name of the project, from which to list the models. + filter_ (str): An expression for filtering the results of the request. - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.TableSpec` - update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. + - ``model_metadata`` - for existence of the case (e.g. + video_classification_model_metadata:*). - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + - ``dataset_id`` - for = or !=. Some examples of using the filter are: + + - ``image_classification_model_metadata:*`` --> The model has + image_classification_model_metadata. + + - ``dataset_id=5`` --> The model was created from a dataset with ID 5. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2159,7 +2140,10 @@ def update_table_spec( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance. + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances. + You can also iterate over the pages of the response + using its `pages` property. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -2169,24 +2153,24 @@ def update_table_spec( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "update_table_spec" not in self._inner_api_calls: + if "list_models" not in self._inner_api_calls: self._inner_api_calls[ - "update_table_spec" + "list_models" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_table_spec, - default_retry=self._method_configs["UpdateTableSpec"].retry, - default_timeout=self._method_configs["UpdateTableSpec"].timeout, + self.transport.list_models, + default_retry=self._method_configs["ListModels"].retry, + default_timeout=self._method_configs["ListModels"].timeout, client_info=self._client_info, ) - request = service_pb2.UpdateTableSpecRequest( - table_spec=table_spec, update_mask=update_mask + request = service_pb2.ListModelsRequest( + parent=parent, filter=filter_, page_size=page_size, ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("table_spec.name", table_spec.name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -2195,36 +2179,72 @@ def update_table_spec( ) metadata.append(routing_metadata) - return self._inner_api_calls["update_table_spec"]( - request, retry=retry, timeout=timeout, metadata=metadata + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_models"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="model", + request_token_field="page_token", + response_token_field="next_page_token", ) + return iterator - def get_column_spec( + def deploy_model( self, name, - field_mask=None, + image_object_detection_model_deployment_metadata=None, + image_classification_model_deployment_metadata=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Gets a column spec. + Deploys a model. If a model is already deployed, deploying it with + the same parameters has no effect. Deploying with different parametrs + (as e.g. changing + + ``node_number``) will reset the deployment state without pausing the + model's availability. + + Only applicable for Text Classification, Image Object Detection , + Tables, and Image Segmentation; all other domains manage deployment + automatically. + + Returns an empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.column_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]', '[COLUMN_SPEC]') + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> response = client.get_column_spec(name) + >>> response = client.deploy_model(name) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - name (str): The resource name of the column spec to retrieve. - field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. + name (str): Required. Resource name of the model to deploy. + image_object_detection_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata]): Model deployment metadata specific to Image Object Detection. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + message :class:`~google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata` + image_classification_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata]): Model deployment metadata specific to Image Classification. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2235,7 +2255,7 @@ def get_column_spec( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -2245,17 +2265,28 @@ def get_column_spec( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_column_spec" not in self._inner_api_calls: + if "deploy_model" not in self._inner_api_calls: self._inner_api_calls[ - "get_column_spec" + "deploy_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_column_spec, - default_retry=self._method_configs["GetColumnSpec"].retry, - default_timeout=self._method_configs["GetColumnSpec"].timeout, + self.transport.deploy_model, + default_retry=self._method_configs["DeployModel"].retry, + default_timeout=self._method_configs["DeployModel"].timeout, client_info=self._client_info, ) - request = service_pb2.GetColumnSpecRequest(name=name, field_mask=field_mask) + # Sanity check: We have some fields which are mutually exclusive; + # raise ValueError if more than one is sent. + google.api_core.protobuf_helpers.check_oneof( + image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, + image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, + ) + + request = service_pb2.DeployModelRequest( + name=name, + image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, + image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -2269,56 +2300,52 @@ def get_column_spec( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_column_spec"]( + operation = self._inner_api_calls["deploy_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, + ) - def list_column_specs( + def undeploy_model( self, - parent, - field_mask=None, - filter_=None, - page_size=None, + name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Lists column specs in a table spec. + Undeploys a model. If the model is not deployed this method has no + effect. + + Only applicable for Text Classification, Image Object Detection and + Tables; all other domains manage deployment automatically. + + Returns an empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.table_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]') + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> # Iterate over all results - >>> for element in client.list_column_specs(parent): - ... # process element - ... pass + >>> response = client.undeploy_model(name) >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() >>> - >>> # Alternatively: + >>> response.add_done_callback(callback) >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_column_specs(parent).pages: - ... for element in page: - ... # process element - ... pass + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - parent (str): The resource name of the table spec to list column specs from. - field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` - filter_ (str): Filter expression, see go/filtering. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. + name (str): Required. Resource name of the model to undeploy. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2329,10 +2356,7 @@ def list_column_specs( that is provided to the method. Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instances. - You can also iterate over the pages of the response - using its `pages` property. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -2342,24 +2366,22 @@ def list_column_specs( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_column_specs" not in self._inner_api_calls: + if "undeploy_model" not in self._inner_api_calls: self._inner_api_calls[ - "list_column_specs" + "undeploy_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_column_specs, - default_retry=self._method_configs["ListColumnSpecs"].retry, - default_timeout=self._method_configs["ListColumnSpecs"].timeout, + self.transport.undeploy_model, + default_retry=self._method_configs["UndeployModel"].retry, + default_timeout=self._method_configs["UndeployModel"].timeout, client_info=self._client_info, ) - request = service_pb2.ListColumnSpecsRequest( - parent=parent, field_mask=field_mask, filter=filter_, page_size=page_size - ) + request = service_pb2.UndeployModelRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -2368,51 +2390,37 @@ def list_column_specs( ) metadata.append(routing_metadata) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_column_specs"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="column_specs", - request_token_field="page_token", - response_token_field="next_page_token", + operation = self._inner_api_calls["undeploy_model"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, ) - return iterator - def update_column_spec( + def get_model_evaluation( self, - column_spec, - update_mask=None, + name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Updates a column spec. + Gets a model evaluation. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> # TODO: Initialize `column_spec`: - >>> column_spec = {} + >>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]') >>> - >>> response = client.update_column_spec(column_spec) + >>> response = client.get_model_evaluation(name) Args: - column_spec (Union[dict, ~google.cloud.automl_v1beta1.types.ColumnSpec]): The column spec which replaces the resource on the server. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` - update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + name (str): Required. Resource name for the model evaluation. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2423,7 +2431,7 @@ def update_column_spec( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance. + A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -2433,24 +2441,22 @@ def update_column_spec( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "update_column_spec" not in self._inner_api_calls: + if "get_model_evaluation" not in self._inner_api_calls: self._inner_api_calls[ - "update_column_spec" + "get_model_evaluation" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_column_spec, - default_retry=self._method_configs["UpdateColumnSpec"].retry, - default_timeout=self._method_configs["UpdateColumnSpec"].timeout, + self.transport.get_model_evaluation, + default_retry=self._method_configs["GetModelEvaluation"].retry, + default_timeout=self._method_configs["GetModelEvaluation"].timeout, client_info=self._client_info, ) - request = service_pb2.UpdateColumnSpecRequest( - column_spec=column_spec, update_mask=update_mask - ) + request = service_pb2.GetModelEvaluationRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("column_spec.name", column_spec.name)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -2459,6 +2465,6 @@ def update_column_spec( ) metadata.append(routing_metadata) - return self._inner_api_calls["update_column_spec"]( + return self._inner_api_calls["get_model_evaluation"]( request, retry=retry, timeout=timeout, metadata=metadata ) diff --git a/google/cloud/automl_v1beta1/gapic/auto_ml_client_config.py b/google/cloud/automl_v1beta1/gapic/auto_ml_client_config.py index d127ce6f..79d5b6ee 100644 --- a/google/cloud/automl_v1beta1/gapic/auto_ml_client_config.py +++ b/google/cloud/automl_v1beta1/gapic/auto_ml_client_config.py @@ -17,28 +17,8 @@ } }, "methods": { - "CreateDataset": { - "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateDataset": { - "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetDataset": { - "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListDatasets": { - "timeout_millis": 50000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, "DeleteDataset": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, @@ -48,95 +28,115 @@ "retry_params_name": "default", }, "ExportData": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, - "CreateModel": { - "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetModel": { - "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListModels": { - "timeout_millis": 50000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, "DeleteModel": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, - "DeployModel": { - "timeout_millis": 5000, + "ExportModel": { + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, - "UndeployModel": { - "timeout_millis": 5000, + "ExportEvaluatedExamples": { + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, - "GetModelEvaluation": { - "timeout_millis": 5000, - "retry_codes_name": "idempotent", + "ListModelEvaluations": { + "timeout_millis": 50000, + "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, - "ExportModel": { - "timeout_millis": 5000, + "CreateDataset": { + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, - "ExportEvaluatedExamples": { - "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", + "GetDataset": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", "retry_params_name": "default", }, - "ListModelEvaluations": { + "ListDatasets": { "timeout_millis": 50000, + "retry_codes_name": "idempotent", + "retry_params_name": "default", + }, + "UpdateDataset": { + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, "GetAnnotationSpec": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "GetTableSpec": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "ListTableSpecs": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "UpdateTableSpec": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, "GetColumnSpec": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "ListColumnSpecs": { - "timeout_millis": 5000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "UpdateColumnSpec": { - "timeout_millis": 5000, + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", + }, + "CreateModel": { + "timeout_millis": 20000, "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, + "GetModel": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default", + }, + "ListModels": { + "timeout_millis": 50000, + "retry_codes_name": "idempotent", + "retry_params_name": "default", + }, + "DeployModel": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", + }, + "UndeployModel": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", + }, + "GetModelEvaluation": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default", + }, }, } } diff --git a/google/cloud/automl_v1beta1/gapic/enums.py b/google/cloud/automl_v1beta1/gapic/enums.py index 9f09f9ce..2560c4f9 100644 --- a/google/cloud/automl_v1beta1/gapic/enums.py +++ b/google/cloud/automl_v1beta1/gapic/enums.py @@ -36,8 +36,8 @@ class ClassificationType(enum.IntEnum): class NullValue(enum.IntEnum): """ - ``NullValue`` is a singleton enumeration to represent the null value for - the ``Value`` type union. + ``NullValue`` is a singleton enumeration to represent the null value + for the ``Value`` type union. The JSON representation for ``NullValue`` is JSON ``null``. @@ -61,17 +61,17 @@ class TypeCode(enum.IntEnum): ``date-time`` format, where ``time-offset`` = ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z). STRING (int): Encoded as ``string``. - ARRAY (int): Encoded as ``list``, where the list elements are represented according - to + ARRAY (int): Encoded as ``list``, where the list elements are represented + according to ``list_element_type``. - STRUCT (int): Encoded as ``struct``, where field values are represented according to - ``struct_type``. - CATEGORY (int): Values of this type are not further understood by AutoML, e.g. AutoML is - unable to tell the order of values (as it could with FLOAT64), or is - unable to say if one value contains another (as it could with STRING). - Encoded as ``string`` (bytes should be base64-encoded, as described in - RFC 4648, section 4). + STRUCT (int): Encoded as ``struct``, where field values are represented according + to ``struct_type``. + CATEGORY (int): Values of this type are not further understood by AutoML, e.g. + AutoML is unable to tell the order of values (as it could with FLOAT64), + or is unable to say if one value contains another (as it could with + STRING). Encoded as ``string`` (bytes should be base64-encoded, as + described in RFC 4648, section 4). """ TYPE_CODE_UNSPECIFIED = 0 @@ -94,12 +94,12 @@ class TextSegmentType(enum.IntEnum): TOKEN (int): The text segment is a token. e.g. word. PARAGRAPH (int): The text segment is a paragraph. FORM_FIELD (int): The text segment is a form field. - FORM_FIELD_NAME (int): The text segment is the name part of a form field. It will be treated as - child of another FORM\_FIELD TextSegment if its span is subspan of - another TextSegment with type FORM\_FIELD. - FORM_FIELD_CONTENTS (int): The text segment is the text content part of a form field. It will be - treated as child of another FORM\_FIELD TextSegment if its span is - subspan of another TextSegment with type FORM\_FIELD. + FORM_FIELD_NAME (int): The text segment is the name part of a form field. It will be + treated as child of another FORM_FIELD TextSegment if its span is + subspan of another TextSegment with type FORM_FIELD. + FORM_FIELD_CONTENTS (int): The text segment is the text content part of a form field. It will + be treated as child of another FORM_FIELD TextSegment if its span is + subspan of another TextSegment with type FORM_FIELD. TABLE (int): The text segment is a whole table, including headers, and all rows. TABLE_HEADER (int): The text segment is a table's headers. It will be treated as child of another TABLE TextSegment if its span is subspan of another TextSegment @@ -108,8 +108,8 @@ class TextSegmentType(enum.IntEnum): another TABLE TextSegment if its span is subspan of another TextSegment with type TABLE. TABLE_CELL (int): The text segment is a cell in table. It will be treated as child of - another TABLE\_ROW TextSegment if its span is subspan of another - TextSegment with type TABLE\_ROW. + another TABLE_ROW TextSegment if its span is subspan of another + TextSegment with type TABLE_ROW. """ TEXT_SEGMENT_TYPE_UNSPECIFIED = 0 diff --git a/google/cloud/automl_v1beta1/gapic/prediction_service_client.py b/google/cloud/automl_v1beta1/gapic/prediction_service_client.py index 57cedc90..2bcb31ca 100644 --- a/google/cloud/automl_v1beta1/gapic/prediction_service_client.py +++ b/google/cloud/automl_v1beta1/gapic/prediction_service_client.py @@ -36,26 +36,15 @@ from google.cloud.automl_v1beta1.gapic.transports import ( prediction_service_grpc_transport, ) -from google.cloud.automl_v1beta1.proto import annotation_spec_pb2 -from google.cloud.automl_v1beta1.proto import column_spec_pb2 from google.cloud.automl_v1beta1.proto import data_items_pb2 -from google.cloud.automl_v1beta1.proto import dataset_pb2 -from google.cloud.automl_v1beta1.proto import image_pb2 from google.cloud.automl_v1beta1.proto import io_pb2 -from google.cloud.automl_v1beta1.proto import model_evaluation_pb2 -from google.cloud.automl_v1beta1.proto import model_pb2 from google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc -from google.cloud.automl_v1beta1.proto import service_pb2 -from google.cloud.automl_v1beta1.proto import service_pb2_grpc -from google.cloud.automl_v1beta1.proto import table_spec_pb2 from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl").version +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl",).version class PredictionServiceClient(object): @@ -63,7 +52,7 @@ class PredictionServiceClient(object): AutoML Prediction API. On any input that is documented to expect a string parameter in - snake\_case or kebab-case, either of those cases is accepted. + snake_case or kebab-case, either of those cases is accepted. """ SERVICE_ADDRESS = "automl.googleapis.com:443" @@ -190,12 +179,12 @@ def __init__( self.transport = transport else: self.transport = prediction_service_grpc_transport.PredictionServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials + address=api_endpoint, channel=channel, credentials=credentials, ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION + gapic_version=_GAPIC_LIBRARY_VERSION, ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION @@ -206,7 +195,7 @@ def __init__( # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] + client_config["interfaces"][self._INTERFACE_NAME], ) # Save a dictionary of cached API call functions. @@ -231,9 +220,9 @@ def predict( expected request payloads: - Image Classification - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. + image_bytes up to 30MB. - Image Object Detection - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. + image_bytes up to 30MB. - Text Classification - TextSnippet, content up to 60,000 characters, UTF-8 encoded. - Text Extraction - TextSnippet, content up to 30,000 characters, UTF-8 @@ -261,14 +250,14 @@ def predict( >>> response = client.predict(name, payload) Args: - name (str): Name of the model requested to serve the prediction. + name (str): Required. Name of the model requested to serve the prediction. payload (Union[dict, ~google.cloud.automl_v1beta1.types.ExamplePayload]): Required. Payload to perform a prediction on. The payload must match the problem type that the model was trained to solve. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.ExamplePayload` - params (dict[str -> str]): Additional domain-specific parameters, any string must be up to 25000 - characters long. + params (dict[str -> str]): Additional domain-specific parameters, any string must be up to + 25000 characters long. - For Image Classification: @@ -283,13 +272,9 @@ def predict( this number of bounding boxes will be returned in the response. Default is 100, the requested value may be limited by server. - - For Tables: ``feature_importance`` - (boolean) Whether - - [feature\_importance][[google.cloud.automl.v1beta1.TablesModelColumnInfo.feature\_importance] - should be populated in the returned - - [TablesAnnotation(-s)][[google.cloud.automl.v1beta1.TablesAnnotation]. - The default is false. + - For Tables: feature_importance - (boolean) Whether feature importance + should be populated in the returned TablesAnnotation. The default is + false. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -321,7 +306,7 @@ def predict( ) request = prediction_service_pb2.PredictRequest( - name=name, payload=payload, params=params + name=name, payload=payload, params=params, ) if metadata is None: metadata = [] @@ -345,7 +330,7 @@ def batch_predict( name, input_config, output_config, - params=None, + params, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, @@ -377,7 +362,10 @@ def batch_predict( >>> # TODO: Initialize `output_config`: >>> output_config = {} >>> - >>> response = client.batch_predict(name, input_config, output_config) + >>> # TODO: Initialize `params`: + >>> params = {} + >>> + >>> response = client.batch_predict(name, input_config, output_config, params) >>> >>> def callback(operation_future): ... # Handle result. @@ -389,7 +377,7 @@ def batch_predict( >>> metadata = response.metadata() Args: - name (str): Name of the model requested to serve the batch prediction. + name (str): Required. Name of the model requested to serve the batch prediction. input_config (Union[dict, ~google.cloud.automl_v1beta1.types.BatchPredictInputConfig]): Required. The input configuration for batch prediction. If a dict is provided, it must be of the same form as the protobuf @@ -399,8 +387,8 @@ def batch_predict( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.BatchPredictOutputConfig` - params (dict[str -> str]): Additional domain-specific parameters for the predictions, any string - must be up to 25000 characters long. + params (dict[str -> str]): Required. Additional domain-specific parameters for the predictions, + any string must be up to 25000 characters long. - For Text Classification: @@ -423,22 +411,24 @@ def batch_predict( bounding boxes will be produced per image. Default is 100, the requested value may be limited by server. - - For Video Classification : ``score_threshold`` - (float) A value from - 0.0 to 1.0. When the model makes predictions for a video, it will - only produce results that have at least this confidence score. The - default is 0.5. ``segment_classification`` - (boolean) Set to true to - request segment-level classification. AutoML Video Intelligence - returns labels and their confidence scores for the entire segment of - the video that user specified in the request configuration. The - default is "true". ``shot_classification`` - (boolean) Set to true to - request shot-level classification. AutoML Video Intelligence - determines the boundaries for each camera shot in the entire segment - of the video that user specified in the request configuration. AutoML - Video Intelligence then returns labels and their confidence scores - for each detected shot, along with the start and end time of the - shot. WARNING: Model evaluation is not done for this classification - type, the quality of it depends on training data, but there are no - metrics provided to describe that quality. The default is "false". + - For Video Classification : + + ``score_threshold`` - (float) A value from 0.0 to 1.0. When the model + makes predictions for a video, it will only produce results that have + at least this confidence score. The default is 0.5. + ``segment_classification`` - (boolean) Set to true to request + segment-level classification. AutoML Video Intelligence returns + labels and their confidence scores for the entire segment of the + video that user specified in the request configuration. The default + is "true". ``shot_classification`` - (boolean) Set to true to request + shot-level classification. AutoML Video Intelligence determines the + boundaries for each camera shot in the entire segment of the video + that user specified in the request configuration. AutoML Video + Intelligence then returns labels and their confidence scores for each + detected shot, along with the start and end time of the shot. + WARNING: Model evaluation is not done for this classification type, + the quality of it depends on training data, but there are no metrics + provided to describe that quality. The default is "false". ``1s_interval_classification`` - (boolean) Set to true to request classification for a video at one-second intervals. AutoML Video Intelligence returns labels and their confidence scores for each @@ -448,15 +438,22 @@ def batch_predict( there are no metrics provided to describe that quality. The default is "false". - - For Video Object Tracking: ``score_threshold`` - (float) When Model - detects objects on video frames, it will only produce bounding boxes - which have at least this confidence score. Value in 0 to 1 range, - default is 0.5. ``max_bounding_box_count`` - (int64) No more than - this number of bounding boxes will be returned per frame. Default is - 100, the requested value may be limited by server. - ``min_bounding_box_size`` - (float) Only bounding boxes with shortest - edge at least that long as a relative value of video frame size will - be returned. Value in 0 to 1 range. Default is 0. + - For Tables: + + feature_importance - (boolean) Whether feature importance should be + populated in the returned TablesAnnotations. The default is false. + + - For Video Object Tracking: + + ``score_threshold`` - (float) When Model detects objects on video + frames, it will only produce bounding boxes which have at least this + confidence score. Value in 0 to 1 range, default is 0.5. + ``max_bounding_box_count`` - (int64) No more than this number of + bounding boxes will be returned per frame. Default is 100, the + requested value may be limited by server. ``min_bounding_box_size`` - + (float) Only bounding boxes with shortest edge at least that long as + a relative value of video frame size will be returned. Value in 0 to + 1 range. Default is 0. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. diff --git a/google/cloud/automl_v1beta1/gapic/transports/auto_ml_grpc_transport.py b/google/cloud/automl_v1beta1/gapic/transports/auto_ml_grpc_transport.py index 106c3c6c..5a8e1b5d 100644 --- a/google/cloud/automl_v1beta1/gapic/transports/auto_ml_grpc_transport.py +++ b/google/cloud/automl_v1beta1/gapic/transports/auto_ml_grpc_transport.py @@ -54,7 +54,7 @@ def __init__( # exception (channels come with credentials baked in already). if channel is not None and credentials is not None: raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." + "The `channel` and `credentials` arguments are mutually " "exclusive.", ) # Create the channel. @@ -72,7 +72,9 @@ def __init__( # gRPC uses objects called "stubs" that are bound to the # channel and provide a basic method for each RPC. - self._stubs = {"auto_ml_stub": service_pb2_grpc.AutoMlStub(channel)} + self._stubs = { + "auto_ml_stub": service_pb2_grpc.AutoMlStub(channel), + } # Because this API includes a method that returns a # long-running operation (proto: google.longrunning.Operation), @@ -114,361 +116,362 @@ def channel(self): return self._channel @property - def create_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.create_dataset`. + def delete_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.delete_dataset`. - Creates a dataset. + Deletes a dataset and all of its contents. Returns empty response in + the ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].CreateDataset + return self._stubs["auto_ml_stub"].DeleteDataset @property - def update_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_dataset`. + def import_data(self): + """Return the gRPC stub for :meth:`AutoMlClient.import_data`. - Updates a dataset. + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A ``schema_inference_version`` parameter must be explicitly set. + Returns an empty response in the ``response`` field when it + completes. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].UpdateDataset + return self._stubs["auto_ml_stub"].ImportData @property - def get_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_dataset`. + def export_data(self): + """Return the gRPC stub for :meth:`AutoMlClient.export_data`. - Gets a dataset. + Exports dataset's data to the provided output location. Returns an + empty response in the ``response`` field when it completes. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].GetDataset + return self._stubs["auto_ml_stub"].ExportData @property - def list_datasets(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_datasets`. + def delete_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.delete_model`. - Lists datasets in a project. + Deletes a model. Returns ``google.protobuf.Empty`` in the + ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ListDatasets + return self._stubs["auto_ml_stub"].DeleteModel @property - def delete_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.delete_dataset`. + def export_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.export_model`. - Deletes a dataset and all of its contents. Returns empty response in the - ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. + Exports a trained, "export-able", model to a user specified Google + Cloud Storage location. A model is considered export-able if and only if + it has an export format defined for it in + + ``ModelExportOutputConfig``. + + Returns an empty response in the ``response`` field when it completes. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].DeleteDataset + return self._stubs["auto_ml_stub"].ExportModel @property - def import_data(self): - """Return the gRPC stub for :meth:`AutoMlClient.import_data`. + def export_evaluated_examples(self): + """Return the gRPC stub for :meth:`AutoMlClient.export_evaluated_examples`. - Imports data into a dataset. For Tables this method can only be called - on an empty Dataset. + Exports examples on which the model was evaluated (i.e. which were + in the TEST set of the dataset the model was created from), together + with their ground truth annotations and the annotations created + (predicted) by the model. The examples, ground truth and predictions are + exported in the state they were at the moment the model was evaluated. - For Tables: + This export is available only for 30 days since the model evaluation is + created. - - A ``schema_inference_version`` parameter must be explicitly set. - Returns an empty response in the ``response`` field when it - completes. + Currently only available for Tables. + + Returns an empty response in the ``response`` field when it completes. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ImportData + return self._stubs["auto_ml_stub"].ExportEvaluatedExamples @property - def export_data(self): - """Return the gRPC stub for :meth:`AutoMlClient.export_data`. + def list_model_evaluations(self): + """Return the gRPC stub for :meth:`AutoMlClient.list_model_evaluations`. - Exports dataset's data to the provided output location. Returns an empty - response in the ``response`` field when it completes. + Lists model evaluations. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ExportData + return self._stubs["auto_ml_stub"].ListModelEvaluations @property - def create_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.create_model`. + def create_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.create_dataset`. - Creates a model. Returns a Model in the ``response`` field when it - completes. When you create a model, several model evaluations are - created for it: a global evaluation, and one evaluation for each - annotation spec. + Creates a dataset. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].CreateModel + return self._stubs["auto_ml_stub"].CreateDataset @property - def get_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_model`. + def get_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.get_dataset`. - Gets a model. + Gets a dataset. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].GetModel + return self._stubs["auto_ml_stub"].GetDataset @property - def list_models(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_models`. + def list_datasets(self): + """Return the gRPC stub for :meth:`AutoMlClient.list_datasets`. - Lists models. + Lists datasets in a project. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ListModels + return self._stubs["auto_ml_stub"].ListDatasets @property - def delete_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.delete_model`. + def update_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.update_dataset`. - Deletes a model. Returns ``google.protobuf.Empty`` in the ``response`` - field when it completes, and ``delete_details`` in the ``metadata`` - field. + Updates a dataset. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].DeleteModel + return self._stubs["auto_ml_stub"].UpdateDataset @property - def deploy_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.deploy_model`. - - Deploys a model. If a model is already deployed, deploying it with the - same parameters has no effect. Deploying with different parametrs (as - e.g. changing - - ``node_number``) will reset the deployment state without pausing the - model's availability. - - Only applicable for Text Classification, Image Object Detection and - Tables; all other domains manage deployment automatically. + def get_annotation_spec(self): + """Return the gRPC stub for :meth:`AutoMlClient.get_annotation_spec`. - Returns an empty response in the ``response`` field when it completes. + Gets an annotation spec. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].DeployModel + return self._stubs["auto_ml_stub"].GetAnnotationSpec @property - def undeploy_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.undeploy_model`. - - Undeploys a model. If the model is not deployed this method has no - effect. - - Only applicable for Text Classification, Image Object Detection and - Tables; all other domains manage deployment automatically. + def get_table_spec(self): + """Return the gRPC stub for :meth:`AutoMlClient.get_table_spec`. - Returns an empty response in the ``response`` field when it completes. + Gets a table spec. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].UndeployModel + return self._stubs["auto_ml_stub"].GetTableSpec @property - def get_model_evaluation(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_model_evaluation`. + def list_table_specs(self): + """Return the gRPC stub for :meth:`AutoMlClient.list_table_specs`. - Gets a model evaluation. + Lists table specs in a dataset. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].GetModelEvaluation + return self._stubs["auto_ml_stub"].ListTableSpecs @property - def export_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.export_model`. - - Exports a trained, "export-able", model to a user specified Google Cloud - Storage location. A model is considered export-able if and only if it - has an export format defined for it in - - ``ModelExportOutputConfig``. + def update_table_spec(self): + """Return the gRPC stub for :meth:`AutoMlClient.update_table_spec`. - Returns an empty response in the ``response`` field when it completes. + Updates a table spec. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ExportModel + return self._stubs["auto_ml_stub"].UpdateTableSpec @property - def export_evaluated_examples(self): - """Return the gRPC stub for :meth:`AutoMlClient.export_evaluated_examples`. - - Exports examples on which the model was evaluated (i.e. which were in - the TEST set of the dataset the model was created from), together with - their ground truth annotations and the annotations created (predicted) - by the model. The examples, ground truth and predictions are exported in - the state they were at the moment the model was evaluated. - - This export is available only for 30 days since the model evaluation is - created. - - Currently only available for Tables. + def get_column_spec(self): + """Return the gRPC stub for :meth:`AutoMlClient.get_column_spec`. - Returns an empty response in the ``response`` field when it completes. + Gets a column spec. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ExportEvaluatedExamples + return self._stubs["auto_ml_stub"].GetColumnSpec @property - def list_model_evaluations(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_model_evaluations`. + def list_column_specs(self): + """Return the gRPC stub for :meth:`AutoMlClient.list_column_specs`. - Lists model evaluations. + Lists column specs in a table spec. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ListModelEvaluations + return self._stubs["auto_ml_stub"].ListColumnSpecs @property - def get_annotation_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_annotation_spec`. + def update_column_spec(self): + """Return the gRPC stub for :meth:`AutoMlClient.update_column_spec`. - Gets an annotation spec. + Updates a column spec. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].GetAnnotationSpec + return self._stubs["auto_ml_stub"].UpdateColumnSpec @property - def get_table_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_table_spec`. + def create_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.create_model`. - Gets a table spec. + Creates a model. Returns a Model in the ``response`` field when it + completes. When you create a model, several model evaluations are + created for it: a global evaluation, and one evaluation for each + annotation spec. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].GetTableSpec + return self._stubs["auto_ml_stub"].CreateModel @property - def list_table_specs(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_table_specs`. + def get_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.get_model`. - Lists table specs in a dataset. + Gets a model. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ListTableSpecs + return self._stubs["auto_ml_stub"].GetModel @property - def update_table_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_table_spec`. + def list_models(self): + """Return the gRPC stub for :meth:`AutoMlClient.list_models`. - Updates a table spec. + Lists models. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].UpdateTableSpec + return self._stubs["auto_ml_stub"].ListModels @property - def get_column_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_column_spec`. + def deploy_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.deploy_model`. - Gets a column spec. + Deploys a model. If a model is already deployed, deploying it with + the same parameters has no effect. Deploying with different parametrs + (as e.g. changing + + ``node_number``) will reset the deployment state without pausing the + model's availability. + + Only applicable for Text Classification, Image Object Detection , + Tables, and Image Segmentation; all other domains manage deployment + automatically. + + Returns an empty response in the ``response`` field when it completes. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].GetColumnSpec + return self._stubs["auto_ml_stub"].DeployModel @property - def list_column_specs(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_column_specs`. + def undeploy_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.undeploy_model`. - Lists column specs in a table spec. + Undeploys a model. If the model is not deployed this method has no + effect. + + Only applicable for Text Classification, Image Object Detection and + Tables; all other domains manage deployment automatically. + + Returns an empty response in the ``response`` field when it completes. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ListColumnSpecs + return self._stubs["auto_ml_stub"].UndeployModel @property - def update_column_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_column_spec`. + def get_model_evaluation(self): + """Return the gRPC stub for :meth:`AutoMlClient.get_model_evaluation`. - Updates a column spec. + Gets a model evaluation. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].UpdateColumnSpec + return self._stubs["auto_ml_stub"].GetModelEvaluation diff --git a/google/cloud/automl_v1beta1/gapic/transports/prediction_service_grpc_transport.py b/google/cloud/automl_v1beta1/gapic/transports/prediction_service_grpc_transport.py index 69ebca84..6f2b37b1 100644 --- a/google/cloud/automl_v1beta1/gapic/transports/prediction_service_grpc_transport.py +++ b/google/cloud/automl_v1beta1/gapic/transports/prediction_service_grpc_transport.py @@ -54,7 +54,7 @@ def __init__( # exception (channels come with credentials baked in already). if channel is not None and credentials is not None: raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." + "The `channel` and `credentials` arguments are mutually " "exclusive.", ) # Create the channel. @@ -75,7 +75,7 @@ def __init__( self._stubs = { "prediction_service_stub": prediction_service_pb2_grpc.PredictionServiceStub( channel - ) + ), } # Because this API includes a method that returns a @@ -126,9 +126,9 @@ def predict(self): expected request payloads: - Image Classification - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. + image_bytes up to 30MB. - Image Object Detection - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. + image_bytes up to 30MB. - Text Classification - TextSnippet, content up to 60,000 characters, UTF-8 encoded. - Text Extraction - TextSnippet, content up to 30,000 characters, UTF-8 diff --git a/google/cloud/automl_v1beta1/proto/annotation_payload.proto b/google/cloud/automl_v1beta1/proto/annotation_payload.proto index 7cc2860f..f62bb269 100644 --- a/google/cloud/automl_v1beta1/proto/annotation_payload.proto +++ b/google/cloud/automl_v1beta1/proto/annotation_payload.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py b/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py index f8036318..9843fde7 100644 --- a/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py +++ b/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/annotation_payload.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -41,12 +38,8 @@ name="google/cloud/automl_v1beta1/proto/annotation_payload.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n:google/cloud/automl_v1beta1/proto/annotation_payload.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x31google/cloud/automl_v1beta1/proto/detection.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a\x37google/cloud/automl_v1beta1/proto/text_extraction.proto\x1a\x36google/cloud/automl_v1beta1/proto/text_sentiment.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/api/annotations.proto"\xe6\x05\n\x11\x41nnotationPayload\x12I\n\x0btranslation\x18\x02 \x01(\x0b\x32\x32.google.cloud.automl.v1beta1.TranslationAnnotationH\x00\x12O\n\x0e\x63lassification\x18\x03 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.ClassificationAnnotationH\x00\x12]\n\x16image_object_detection\x18\x04 \x01(\x0b\x32;.google.cloud.automl.v1beta1.ImageObjectDetectionAnnotationH\x00\x12Z\n\x14video_classification\x18\t \x01(\x0b\x32:.google.cloud.automl.v1beta1.VideoClassificationAnnotationH\x00\x12[\n\x15video_object_tracking\x18\x08 \x01(\x0b\x32:.google.cloud.automl.v1beta1.VideoObjectTrackingAnnotationH\x00\x12P\n\x0ftext_extraction\x18\x06 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.TextExtractionAnnotationH\x00\x12N\n\x0etext_sentiment\x18\x07 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.TextSentimentAnnotationH\x00\x12?\n\x06tables\x18\n \x01(\x0b\x32-.google.cloud.automl.v1beta1.TablesAnnotationH\x00\x12\x1a\n\x12\x61nnotation_spec_id\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x05 \x01(\tB\x08\n\x06\x64\x65tailB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n:google/cloud/automl_v1beta1/proto/annotation_payload.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x31google/cloud/automl_v1beta1/proto/detection.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a\x37google/cloud/automl_v1beta1/proto/text_extraction.proto\x1a\x36google/cloud/automl_v1beta1/proto/text_sentiment.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/api/annotations.proto"\xe6\x05\n\x11\x41nnotationPayload\x12I\n\x0btranslation\x18\x02 \x01(\x0b\x32\x32.google.cloud.automl.v1beta1.TranslationAnnotationH\x00\x12O\n\x0e\x63lassification\x18\x03 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.ClassificationAnnotationH\x00\x12]\n\x16image_object_detection\x18\x04 \x01(\x0b\x32;.google.cloud.automl.v1beta1.ImageObjectDetectionAnnotationH\x00\x12Z\n\x14video_classification\x18\t \x01(\x0b\x32:.google.cloud.automl.v1beta1.VideoClassificationAnnotationH\x00\x12[\n\x15video_object_tracking\x18\x08 \x01(\x0b\x32:.google.cloud.automl.v1beta1.VideoObjectTrackingAnnotationH\x00\x12P\n\x0ftext_extraction\x18\x06 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.TextExtractionAnnotationH\x00\x12N\n\x0etext_sentiment\x18\x07 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.TextSentimentAnnotationH\x00\x12?\n\x06tables\x18\n \x01(\x0b\x32-.google.cloud.automl.v1beta1.TablesAnnotationH\x00\x12\x1a\n\x12\x61nnotation_spec_id\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x05 \x01(\tB\x08\n\x06\x64\x65tailB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2.DESCRIPTOR, @@ -220,7 +213,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -238,7 +231,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -262,7 +255,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=470, serialized_end=1212, @@ -362,10 +355,10 @@ AnnotationPayload = _reflection.GeneratedProtocolMessageType( "AnnotationPayload", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATIONPAYLOAD, - __module__="google.cloud.automl_v1beta1.proto.annotation_payload_pb2", - __doc__="""Contains annotation information that is relevant to + { + "DESCRIPTOR": _ANNOTATIONPAYLOAD, + "__module__": "google.cloud.automl_v1beta1.proto.annotation_payload_pb2", + "__doc__": """Contains annotation information that is relevant to AutoML. @@ -396,15 +389,15 @@ an ancestor dataset, or the dataset that was used to train the model in use. display_name: - Output only. The value of [display\_name][google.cloud.automl. - v1beta1.AnnotationSpec.display\_name] when the model was + Output only. The value of [display_name][google.cloud.automl.v + 1beta1.AnnotationSpec.display_name] when the model was trained. Because this field returns a value at model training time, for different models trained using the same dataset, the returned value could be different as model owner could update the ``display_name`` between any two model training. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.AnnotationPayload) - ), + }, ) _sym_db.RegisterMessage(AnnotationPayload) diff --git a/google/cloud/automl_v1beta1/proto/annotation_spec.proto b/google/cloud/automl_v1beta1/proto/annotation_spec.proto index 483792b6..d9df07ee 100644 --- a/google/cloud/automl_v1beta1/proto/annotation_spec.proto +++ b/google/cloud/automl_v1beta1/proto/annotation_spec.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; +import "google/api/resource.proto"; import "google/api/annotations.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; @@ -27,16 +27,19 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // A definition of an annotation spec. message AnnotationSpec { + option (google.api.resource) = { + type: "automl.googleapis.com/AnnotationSpec" + pattern: "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}" + }; + // Output only. Resource name of the annotation spec. // Form: // // 'projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/annotationSpecs/{annotation_spec_id}' string name = 1; - // Required. - // The name of the annotation spec to show in the interface. The name can be + // Required. The name of the annotation spec to show in the interface. The name can be // up to 32 characters long and must match the regexp `[a-zA-Z0-9_]+`. - // (_), and ASCII digits 0-9. string display_name = 2; // Output only. The number of examples in the parent dataset diff --git a/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py b/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py index 80ae0a8a..fe7c6669 100644 --- a/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py +++ b/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/annotation_spec.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,6 +12,7 @@ _sym_db = _symbol_database.Default() +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 @@ -22,13 +20,12 @@ name="google/cloud/automl_v1beta1/proto/annotation_spec.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n7google/cloud/automl_v1beta1/proto/annotation_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"K\n\x0e\x41nnotationSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x15\n\rexample_count\x18\t \x01(\x05\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR], + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n7google/cloud/automl_v1beta1/proto/annotation_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto"\xd6\x01\n\x0e\x41nnotationSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x15\n\rexample_count\x18\t \x01(\x05:\x88\x01\xea\x41\x84\x01\n$automl.googleapis.com/AnnotationSpec\x12\\projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}B\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', + dependencies=[ + google_dot_api_dot_resource__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, + ], ) @@ -48,7 +45,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -66,7 +63,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -97,13 +94,13 @@ extensions=[], nested_types=[], enum_types=[], - serialized_options=None, + serialized_options=b"\352A\204\001\n$automl.googleapis.com/AnnotationSpec\022\\projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=118, - serialized_end=193, + serialized_start=146, + serialized_end=360, ) DESCRIPTOR.message_types_by_name["AnnotationSpec"] = _ANNOTATIONSPEC @@ -112,31 +109,31 @@ AnnotationSpec = _reflection.GeneratedProtocolMessageType( "AnnotationSpec", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATIONSPEC, - __module__="google.cloud.automl_v1beta1.proto.annotation_spec_pb2", - __doc__="""A definition of an annotation spec. + { + "DESCRIPTOR": _ANNOTATIONSPEC, + "__module__": "google.cloud.automl_v1beta1.proto.annotation_spec_pb2", + "__doc__": """A definition of an annotation spec. Attributes: name: - Output only. Resource name of the annotation spec. Form: 'pro - jects/{project\_id}/locations/{location\_id}/datasets/{dataset - \_id}/annotationSpecs/{annotation\_spec\_id}' + Output only. Resource name of the annotation spec. Form: ‘pro + jects/{project_id}/locations/{location_id}/datasets/{dataset_i + d}/annotationSpecs/{annotation_spec_id}’ display_name: Required. The name of the annotation spec to show in the interface. The name can be up to 32 characters long and must - match the regexp ``[a-zA-Z0-9_]+``. (\_), and ASCII digits - 0-9. + match the regexp ``[a-zA-Z0-9_]+``. example_count: Output only. The number of examples in the parent dataset labeled by the annotation spec. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.AnnotationSpec) - ), + }, ) _sym_db.RegisterMessage(AnnotationSpec) DESCRIPTOR._options = None +_ANNOTATIONSPEC._options = None # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/classification.proto b/google/cloud/automl_v1beta1/proto/classification.proto index c8475542..0594d01e 100644 --- a/google/cloud/automl_v1beta1/proto/classification.proto +++ b/google/cloud/automl_v1beta1/proto/classification.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,14 +11,13 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; -import "google/api/annotations.proto"; import "google/cloud/automl/v1beta1/temporal.proto"; +import "google/api/annotations.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; option java_outer_classname = "ClassificationProto"; @@ -126,10 +125,7 @@ message ClassificationEvaluationMetrics { // for each example. float false_positive_rate_at1 = 9; - // Output only. The harmonic mean of - // [recall_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] - // and - // [precision_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1]. + // Output only. The harmonic mean of [recall_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] and [precision_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1]. float f1_score_at1 = 7; // Output only. The number of model created labels that match a ground truth @@ -156,9 +152,7 @@ message ClassificationEvaluationMetrics { // Output only. Value of the specific cell in the confusion matrix. // The number of values each row has (i.e. the length of the row) is equal // to the length of the `annotation_spec_id` field or, if that one is not - // populated, length of the - // [display_name][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name] - // field. + // populated, length of the [display_name][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name] field. repeated int32 example_count = 1; } diff --git a/google/cloud/automl_v1beta1/proto/classification_pb2.py b/google/cloud/automl_v1beta1/proto/classification_pb2.py index 68651a84..7b123416 100644 --- a/google/cloud/automl_v1beta1/proto/classification_pb2.py +++ b/google/cloud/automl_v1beta1/proto/classification_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/classification.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -16,25 +13,21 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.cloud.automl_v1beta1.proto import ( temporal_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2, ) +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1beta1/proto/classification.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\023ClassificationProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n6google/cloud/automl_v1beta1/proto/classification.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto")\n\x18\x43lassificationAnnotation\x12\r\n\x05score\x18\x01 \x01(\x02"\xc7\x01\n\x1dVideoClassificationAnnotation\x12\x0c\n\x04type\x18\x01 \x01(\t\x12X\n\x19\x63lassification_annotation\x18\x02 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.ClassificationAnnotation\x12>\n\x0ctime_segment\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TimeSegment"\xa9\x07\n\x1f\x43lassificationEvaluationMetrics\x12\x0e\n\x06\x61u_prc\x18\x01 \x01(\x02\x12\x17\n\x0b\x62\x61se_au_prc\x18\x02 \x01(\x02\x42\x02\x18\x01\x12\x0e\n\x06\x61u_roc\x18\x06 \x01(\x02\x12\x10\n\x08log_loss\x18\x07 \x01(\x02\x12u\n\x18\x63onfidence_metrics_entry\x18\x03 \x03(\x0b\x32S.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry\x12\x66\n\x10\x63onfusion_matrix\x18\x04 \x01(\x0b\x32L.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x05 \x03(\t\x1a\xfc\x02\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x1a\n\x12position_threshold\x18\x0e \x01(\x05\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x11\n\tprecision\x18\x03 \x01(\x02\x12\x1b\n\x13\x66\x61lse_positive_rate\x18\x08 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x04 \x01(\x02\x12\x12\n\nrecall_at1\x18\x05 \x01(\x02\x12\x15\n\rprecision_at1\x18\x06 \x01(\x02\x12\x1f\n\x17\x66\x61lse_positive_rate_at1\x18\t \x01(\x02\x12\x14\n\x0c\x66\x31_score_at1\x18\x07 \x01(\x02\x12\x1b\n\x13true_positive_count\x18\n \x01(\x03\x12\x1c\n\x14\x66\x61lse_positive_count\x18\x0b \x01(\x03\x12\x1c\n\x14\x66\x61lse_negative_count\x18\x0c \x01(\x03\x12\x1b\n\x13true_negative_count\x18\r \x01(\x03\x1a\xc0\x01\n\x0f\x43onfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x01 \x03(\t\x12\x14\n\x0c\x64isplay_name\x18\x03 \x03(\t\x12]\n\x03row\x18\x02 \x03(\x0b\x32P.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.Row\x1a\x1c\n\x03Row\x12\x15\n\rexample_count\x18\x01 \x03(\x05*Y\n\x12\x43lassificationType\x12#\n\x1f\x43LASSIFICATION_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nMULTICLASS\x10\x01\x12\x0e\n\nMULTILABEL\x10\x02\x42\xb8\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x13\x43lassificationProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\023ClassificationProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n6google/cloud/automl_v1beta1/proto/classification.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto\x1a\x1cgoogle/api/annotations.proto")\n\x18\x43lassificationAnnotation\x12\r\n\x05score\x18\x01 \x01(\x02"\xc7\x01\n\x1dVideoClassificationAnnotation\x12\x0c\n\x04type\x18\x01 \x01(\t\x12X\n\x19\x63lassification_annotation\x18\x02 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.ClassificationAnnotation\x12>\n\x0ctime_segment\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TimeSegment"\xa9\x07\n\x1f\x43lassificationEvaluationMetrics\x12\x0e\n\x06\x61u_prc\x18\x01 \x01(\x02\x12\x17\n\x0b\x62\x61se_au_prc\x18\x02 \x01(\x02\x42\x02\x18\x01\x12\x0e\n\x06\x61u_roc\x18\x06 \x01(\x02\x12\x10\n\x08log_loss\x18\x07 \x01(\x02\x12u\n\x18\x63onfidence_metrics_entry\x18\x03 \x03(\x0b\x32S.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry\x12\x66\n\x10\x63onfusion_matrix\x18\x04 \x01(\x0b\x32L.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x05 \x03(\t\x1a\xfc\x02\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x1a\n\x12position_threshold\x18\x0e \x01(\x05\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x11\n\tprecision\x18\x03 \x01(\x02\x12\x1b\n\x13\x66\x61lse_positive_rate\x18\x08 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x04 \x01(\x02\x12\x12\n\nrecall_at1\x18\x05 \x01(\x02\x12\x15\n\rprecision_at1\x18\x06 \x01(\x02\x12\x1f\n\x17\x66\x61lse_positive_rate_at1\x18\t \x01(\x02\x12\x14\n\x0c\x66\x31_score_at1\x18\x07 \x01(\x02\x12\x1b\n\x13true_positive_count\x18\n \x01(\x03\x12\x1c\n\x14\x66\x61lse_positive_count\x18\x0b \x01(\x03\x12\x1c\n\x14\x66\x61lse_negative_count\x18\x0c \x01(\x03\x12\x1b\n\x13true_negative_count\x18\r \x01(\x03\x1a\xc0\x01\n\x0f\x43onfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x01 \x03(\t\x12\x14\n\x0c\x64isplay_name\x18\x03 \x03(\t\x12]\n\x03row\x18\x02 \x03(\x0b\x32P.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.Row\x1a\x1c\n\x03Row\x12\x15\n\rexample_count\x18\x01 \x03(\x05*Y\n\x12\x43lassificationType\x12#\n\x1f\x43LASSIFICATION_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nMULTICLASS\x10\x01\x12\x0e\n\nMULTILABEL\x10\x02\x42\xb8\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x13\x43lassificationProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], ) @@ -95,7 +88,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -126,7 +119,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -481,7 +474,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -558,7 +551,7 @@ ), ], extensions=[], - nested_types=[_CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW], + nested_types=[_CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW,], enum_types=[], serialized_options=None, is_extendable=False, @@ -609,7 +602,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\030\001"), + serialized_options=b"\030\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -757,10 +750,10 @@ ClassificationAnnotation = _reflection.GeneratedProtocolMessageType( "ClassificationAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFICATIONANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.classification_pb2", - __doc__="""Contains annotation details specific to classification. + { + "DESCRIPTOR": _CLASSIFICATIONANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", + "__doc__": """Contains annotation details specific to classification. Attributes: @@ -772,17 +765,17 @@ an annotation, the score is 0 for negative or 1 for positive. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationAnnotation) - ), + }, ) _sym_db.RegisterMessage(ClassificationAnnotation) VideoClassificationAnnotation = _reflection.GeneratedProtocolMessageType( "VideoClassificationAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOCLASSIFICATIONANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.classification_pb2", - __doc__="""Contains annotation details specific to video + { + "DESCRIPTOR": _VIDEOCLASSIFICATIONANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", + "__doc__": """Contains annotation details specific to video classification. @@ -817,21 +810,21 @@ annotation applies. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoClassificationAnnotation) - ), + }, ) _sym_db.RegisterMessage(VideoClassificationAnnotation) ClassificationEvaluationMetrics = _reflection.GeneratedProtocolMessageType( "ClassificationEvaluationMetrics", (_message.Message,), - dict( - ConfidenceMetricsEntry=_reflection.GeneratedProtocolMessageType( + { + "ConfidenceMetricsEntry": _reflection.GeneratedProtocolMessageType( "ConfidenceMetricsEntry", (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFICATIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, - __module__="google.cloud.automl_v1beta1.proto.classification_pb2", - __doc__="""Metrics for a single confidence threshold. + { + "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", + "__doc__": """Metrics for a single confidence threshold. Attributes: @@ -843,7 +836,7 @@ Output only. Metrics are computed with an assumption that the model always returns at most this many predictions (ordered by their score, descendingly), but they all still need to meet - the confidence\_threshold. + the confidence_threshold. recall: Output only. Recall (True Positive Rate) for the given confidence threshold. @@ -867,11 +860,11 @@ label that has the highest prediction score and not below the confidence threshold for each example. f1_score_at1: - Output only. The harmonic mean of [recall\_at1][google.cloud.a - utoml.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetric - sEntry.recall\_at1] and [precision\_at1][google.cloud.automl.v - 1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry. - precision\_at1]. + Output only. The harmonic mean of [recall_at1][google.cloud.au + toml.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetrics + Entry.recall_at1] and [precision_at1][google.cloud.automl.v1be + ta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.pre + cision_at1]. true_positive_count: Output only. The number of model created labels that match a ground truth label. @@ -887,50 +880,50 @@ label. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry) - ), + }, ), - ConfusionMatrix=_reflection.GeneratedProtocolMessageType( + "ConfusionMatrix": _reflection.GeneratedProtocolMessageType( "ConfusionMatrix", (_message.Message,), - dict( - Row=_reflection.GeneratedProtocolMessageType( + { + "Row": _reflection.GeneratedProtocolMessageType( "Row", (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW, - __module__="google.cloud.automl_v1beta1.proto.classification_pb2", - __doc__="""Output only. A row in the confusion matrix. + { + "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW, + "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", + "__doc__": """Output only. A row in the confusion matrix. Attributes: example_count: Output only. Value of the specific cell in the confusion - matrix. The number of values each row has (i.e. the length of + matrix. The number of values each row has (i.e. the length of the row) is equal to the length of the ``annotation_spec_id`` field or, if that one is not populated, length of the [display - \_name][google.cloud.automl.v1beta1.ClassificationEvaluationMe - trics.ConfusionMatrix.display\_name] field. + _name][google.cloud.automl.v1beta1.ClassificationEvaluationMet + rics.ConfusionMatrix.display_name] field. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.Row) - ), + }, ), - DESCRIPTOR=_CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX, - __module__="google.cloud.automl_v1beta1.proto.classification_pb2", - __doc__="""Confusion matrix of the model running the classification. + "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX, + "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", + "__doc__": """Confusion matrix of the model running the classification. Attributes: annotation_spec_id: Output only. IDs of the annotation specs used in the confusion - matrix. For Tables CLASSIFICATION [prediction\_type][google.c - loud.automl.v1beta1.TablesModelMetadata.prediction\_type] only - list of [annotation\_spec\_display\_name-s][] is populated. + matrix. For Tables CLASSIFICATION [prediction_type][google.cl + oud.automl.v1beta1.TablesModelMetadata.prediction_type] only + list of [annotation_spec_display_name-s][] is populated. display_name: Output only. Display name of the annotation specs used in the confusion matrix, as they were at the moment of the - evaluation. For Tables CLASSIFICATION [prediction\_type-s][go - ogle.cloud.automl.v1beta1.TablesModelMetadata.prediction\_type - ], distinct values of the target column at the moment of the + evaluation. For Tables CLASSIFICATION [prediction_type-s][goo + gle.cloud.automl.v1beta1.TablesModelMetadata.prediction_type], + distinct values of the target column at the moment of the model evaluation are populated here. row: Output only. Rows in the confusion matrix. The number of rows @@ -941,13 +934,13 @@ evaluated. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix) - ), + }, ), - DESCRIPTOR=_CLASSIFICATIONEVALUATIONMETRICS, - __module__="google.cloud.automl_v1beta1.proto.classification_pb2", - __doc__="""Model evaluation metrics for classification problems. + "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS, + "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", + "__doc__": """Model evaluation metrics for classification problems. Note: For Video Classification this metrics only describe quality of the - Video Classification predictions of "segment\_classification" type. + Video Classification predictions of “segment_classification” type. Attributes: @@ -964,12 +957,12 @@ log_loss: Output only. The Log Loss metric. confidence_metrics_entry: - Output only. Metrics for each confidence\_threshold in - 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and - position\_threshold = INT32\_MAX\_VALUE. ROC and precision- - recall curves, and other aggregated metrics are derived from - them. The confidence metrics entries may also be supplied for - additional values of position\_threshold, but from these no + Output only. Metrics for each confidence_threshold in + 0.00,0.05,0.10,…,0.95,0.96,0.97,0.98,0.99 and + position_threshold = INT32_MAX_VALUE. ROC and precision-recall + curves, and other aggregated metrics are derived from them. + The confidence metrics entries may also be supplied for + additional values of position_threshold, but from these no aggregated metrics are computed. confusion_matrix: Output only. Confusion matrix of the evaluation. Only set for @@ -980,7 +973,7 @@ Output only. The annotation spec ids used for this evaluation. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationEvaluationMetrics) - ), + }, ) _sym_db.RegisterMessage(ClassificationEvaluationMetrics) _sym_db.RegisterMessage(ClassificationEvaluationMetrics.ConfidenceMetricsEntry) diff --git a/google/cloud/automl_v1beta1/proto/column_spec.proto b/google/cloud/automl_v1beta1/proto/column_spec.proto index b8f437f7..03389b8a 100644 --- a/google/cloud/automl_v1beta1/proto/column_spec.proto +++ b/google/cloud/automl_v1beta1/proto/column_spec.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; +import "google/api/resource.proto"; import "google/cloud/automl/v1beta1/data_stats.proto"; import "google/cloud/automl/v1beta1/data_types.proto"; import "google/api/annotations.proto"; @@ -32,6 +32,11 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // Used by: // * Tables message ColumnSpec { + option (google.api.resource) = { + type: "automl.googleapis.com/ColumnSpec" + pattern: "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}" + }; + // Identifies the table's column, and its correlation with the column this // ColumnSpec describes. message CorrelatedColumn { diff --git a/google/cloud/automl_v1beta1/proto/column_spec_pb2.py b/google/cloud/automl_v1beta1/proto/column_spec_pb2.py index 844bc058..d4c075f2 100644 --- a/google/cloud/automl_v1beta1/proto/column_spec_pb2.py +++ b/google/cloud/automl_v1beta1/proto/column_spec_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/column_spec.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,6 +12,7 @@ _sym_db = _symbol_database.Default() +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1beta1.proto import ( data_stats_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__stats__pb2, ) @@ -28,13 +26,10 @@ name="google/cloud/automl_v1beta1/proto/column_spec.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n3google/cloud/automl_v1beta1/proto/column_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x32google/cloud/automl_v1beta1/proto/data_stats.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_types.proto\x1a\x1cgoogle/api/annotations.proto"\x84\x03\n\nColumnSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\tdata_type\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataType\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12:\n\ndata_stats\x18\x04 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats\x12X\n\x16top_correlated_columns\x18\x05 \x03(\x0b\x32\x38.google.cloud.automl.v1beta1.ColumnSpec.CorrelatedColumn\x12\x0c\n\x04\x65tag\x18\x06 \x01(\t\x1at\n\x10\x43orrelatedColumn\x12\x16\n\x0e\x63olumn_spec_id\x18\x01 \x01(\t\x12H\n\x11\x63orrelation_stats\x18\x02 \x01(\x0b\x32-.google.cloud.automl.v1beta1.CorrelationStatsB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n3google/cloud/automl_v1beta1/proto/column_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_stats.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_types.proto\x1a\x1cgoogle/api/annotations.proto"\x9b\x04\n\nColumnSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\tdata_type\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataType\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12:\n\ndata_stats\x18\x04 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats\x12X\n\x16top_correlated_columns\x18\x05 \x03(\x0b\x32\x38.google.cloud.automl.v1beta1.ColumnSpec.CorrelatedColumn\x12\x0c\n\x04\x65tag\x18\x06 \x01(\t\x1at\n\x10\x43orrelatedColumn\x12\x16\n\x0e\x63olumn_spec_id\x18\x01 \x01(\t\x12H\n\x11\x63orrelation_stats\x18\x02 \x01(\x0b\x32-.google.cloud.automl.v1beta1.CorrelationStats:\x94\x01\xea\x41\x90\x01\n automl.googleapis.com/ColumnSpec\x12lprojects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}B\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__stats__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__types__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -58,7 +53,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -94,8 +89,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=491, - serialized_end=607, + serialized_start=518, + serialized_end=634, ) _COLUMNSPEC = _descriptor.Descriptor( @@ -114,7 +109,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -150,7 +145,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -204,7 +199,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -215,15 +210,15 @@ ), ], extensions=[], - nested_types=[_COLUMNSPEC_CORRELATEDCOLUMN], + nested_types=[_COLUMNSPEC_CORRELATEDCOLUMN,], enum_types=[], - serialized_options=None, + serialized_options=b"\352A\220\001\n automl.googleapis.com/ColumnSpec\022lprojects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=219, - serialized_end=607, + serialized_start=246, + serialized_end=785, ) _COLUMNSPEC_CORRELATEDCOLUMN.fields_by_name[ @@ -251,30 +246,30 @@ ColumnSpec = _reflection.GeneratedProtocolMessageType( "ColumnSpec", (_message.Message,), - dict( - CorrelatedColumn=_reflection.GeneratedProtocolMessageType( + { + "CorrelatedColumn": _reflection.GeneratedProtocolMessageType( "CorrelatedColumn", (_message.Message,), - dict( - DESCRIPTOR=_COLUMNSPEC_CORRELATEDCOLUMN, - __module__="google.cloud.automl_v1beta1.proto.column_spec_pb2", - __doc__="""Identifies the table's column, and its correlation with + { + "DESCRIPTOR": _COLUMNSPEC_CORRELATEDCOLUMN, + "__module__": "google.cloud.automl_v1beta1.proto.column_spec_pb2", + "__doc__": """Identifies the table’s column, and its correlation with the column this ColumnSpec describes. Attributes: column_spec_id: - The column\_spec\_id of the correlated column, which belongs - to the same table as the in-context column. + The column_spec_id of the correlated column, which belongs to + the same table as the in-context column. correlation_stats: Correlation between this and the in-context column. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ColumnSpec.CorrelatedColumn) - ), + }, ), - DESCRIPTOR=_COLUMNSPEC, - __module__="google.cloud.automl_v1beta1.proto.column_spec_pb2", - __doc__="""A representation of a column in a relational table. When + "DESCRIPTOR": _COLUMNSPEC, + "__module__": "google.cloud.automl_v1beta1.proto.column_spec_pb2", + "__doc__": """A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were given on import . Used by: \* Tables @@ -290,25 +285,26 @@ Output only. The name of the column to show in the interface. The name can be up to 100 characters long and can consist only of ASCII Latin letters A-Z and a-z, ASCII digits 0-9, - underscores(\_), and forward slashes(/), and must start with a + underscores(_), and forward slashes(/), and must start with a letter or a digit. data_stats: Output only. Stats of the series of values in the column. This - field may be stale, see the ancestor's - Dataset.tables\_dataset\_metadata.stats\_update\_time field - for the timestamp at which these stats were last updated. + field may be stale, see the ancestor’s + Dataset.tables_dataset_metadata.stats_update_time field for + the timestamp at which these stats were last updated. top_correlated_columns: Deprecated. etag: Used to perform consistent read-modify-write updates. If not - set, a blind "overwrite" update happens. + set, a blind “overwrite” update happens. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ColumnSpec) - ), + }, ) _sym_db.RegisterMessage(ColumnSpec) _sym_db.RegisterMessage(ColumnSpec.CorrelatedColumn) DESCRIPTOR._options = None +_COLUMNSPEC._options = None # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/data_items.proto b/google/cloud/automl_v1beta1/proto/data_items.proto index 424a0c64..9b9187ad 100644 --- a/google/cloud/automl_v1beta1/proto/data_items.proto +++ b/google/cloud/automl_v1beta1/proto/data_items.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,19 +11,19 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; -import "google/api/annotations.proto"; import "google/cloud/automl/v1beta1/geometry.proto"; import "google/cloud/automl/v1beta1/io.proto"; +import "google/cloud/automl/v1beta1/temporal.proto"; import "google/cloud/automl/v1beta1/text_segment.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; +import "google/api/annotations.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; option java_multiple_files = true; @@ -35,11 +35,9 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // Only images up to 30MB in size are supported. message Image { // Input only. The data representing the image. - // For Predict calls - // [image_bytes][google.cloud.automl.v1beta1.Image.image_bytes] must be set, - // as other options are not currently supported by prediction API. You can - // read the contents of an uploaded image by using the - // [content_uri][google.cloud.automl.v1beta1.Image.content_uri] field. + // For Predict calls [image_bytes][google.cloud.automl.v1beta1.Image.image_bytes] must be set, as other options are not + // currently supported by prediction API. You can read the contents of an + // uploaded image by using the [content_uri][google.cloud.automl.v1beta1.Image.content_uri] field. oneof data { // Image content represented as a stream of bytes. // Note: As with all `bytes` fields, protobuffers use a pure binary @@ -60,11 +58,9 @@ message TextSnippet { // characters long. string content = 1; - // Optional. The format of - // [content][google.cloud.automl.v1beta1.TextSnippet.content]. Currently the - // only two allowed values are "text/html" and "text/plain". If left blank, - // the format is automatically determined from the type of the uploaded - // [content][google.cloud.automl.v1beta1.TextSnippet.content]. + // Optional. The format of [content][google.cloud.automl.v1beta1.TextSnippet.content]. Currently the only two allowed + // values are "text/html" and "text/plain". If left blank, the format is + // automatically determined from the type of the uploaded [content][google.cloud.automl.v1beta1.TextSnippet.content]. string mime_type = 2; // Output only. HTTP URI where you can download the content. @@ -100,9 +96,7 @@ message DocumentDimensions { // A structured text document e.g. a PDF. message Document { - // Describes the layout information of a - // [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in - // the document. + // Describes the layout information of a [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in the document. message Layout { // The type of TextSegment in the context of the original document. enum TextSegmentType { @@ -151,14 +145,12 @@ message Document { // [document_text][google.cloud.automl.v1beta1.Document.document_text]. TextSegment text_segment = 1; - // Page number of the - // [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] - // in the original document, starts from 1. + // Page number of the [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in the original document, starts + // from 1. int32 page_number = 2; - // The position of the - // [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] - // in the page. Contains exactly 4 + // The position of the [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in the page. + // Contains exactly 4 // // [normalized_vertices][google.cloud.automl.v1beta1.BoundingPoly.normalized_vertices] // and they are connected by edges in the order provided, which will @@ -168,9 +160,7 @@ message Document { // Coordinates are based on top-left as point (0,0). BoundingPoly bounding_poly = 3; - // The type of the - // [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] - // in document. + // The type of the [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in document. TextSegmentType text_segment_type = 4; } diff --git a/google/cloud/automl_v1beta1/proto/data_items_pb2.py b/google/cloud/automl_v1beta1/proto/data_items_pb2.py index c76bcf28..ff6524d8 100644 --- a/google/cloud/automl_v1beta1/proto/data_items_pb2.py +++ b/google/cloud/automl_v1beta1/proto/data_items_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/data_items.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,39 +12,39 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.cloud.automl_v1beta1.proto import ( geometry_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_geometry__pb2, ) from google.cloud.automl_v1beta1.proto import ( io_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2, ) +from google.cloud.automl_v1beta1.proto import ( + temporal_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2, +) from google.cloud.automl_v1beta1.proto import ( text_segment_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2, ) from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1beta1/proto/data_items.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n2google/cloud/automl_v1beta1/proto/data_items.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x30google/cloud/automl_v1beta1/proto/geometry.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x34google/cloud/automl_v1beta1/proto/text_segment.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto"\x7f\n\x05Image\x12\x15\n\x0bimage_bytes\x18\x01 \x01(\x0cH\x00\x12@\n\x0cinput_config\x18\x06 \x01(\x0b\x32(.google.cloud.automl.v1beta1.InputConfigH\x00\x12\x15\n\rthumbnail_uri\x18\x04 \x01(\tB\x06\n\x04\x64\x61ta"F\n\x0bTextSnippet\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\t\x12\x11\n\tmime_type\x18\x02 \x01(\t\x12\x13\n\x0b\x63ontent_uri\x18\x04 \x01(\t"\xef\x01\n\x12\x44ocumentDimensions\x12S\n\x04unit\x18\x01 \x01(\x0e\x32\x45.google.cloud.automl.v1beta1.DocumentDimensions.DocumentDimensionUnit\x12\r\n\x05width\x18\x02 \x01(\x02\x12\x0e\n\x06height\x18\x03 \x01(\x02"e\n\x15\x44ocumentDimensionUnit\x12\'\n#DOCUMENT_DIMENSION_UNIT_UNSPECIFIED\x10\x00\x12\x08\n\x04INCH\x10\x01\x12\x0e\n\nCENTIMETER\x10\x02\x12\t\n\x05POINT\x10\x03"\xf9\x05\n\x08\x44ocument\x12\x46\n\x0cinput_config\x18\x01 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.DocumentInputConfig\x12?\n\rdocument_text\x18\x02 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippet\x12<\n\x06layout\x18\x03 \x03(\x0b\x32,.google.cloud.automl.v1beta1.Document.Layout\x12L\n\x13\x64ocument_dimensions\x18\x04 \x01(\x0b\x32/.google.cloud.automl.v1beta1.DocumentDimensions\x12\x12\n\npage_count\x18\x05 \x01(\x05\x1a\xc3\x03\n\x06Layout\x12>\n\x0ctext_segment\x18\x01 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSegment\x12\x13\n\x0bpage_number\x18\x02 \x01(\x05\x12@\n\rbounding_poly\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12W\n\x11text_segment_type\x18\x04 \x01(\x0e\x32<.google.cloud.automl.v1beta1.Document.Layout.TextSegmentType"\xc8\x01\n\x0fTextSegmentType\x12!\n\x1dTEXT_SEGMENT_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05TOKEN\x10\x01\x12\r\n\tPARAGRAPH\x10\x02\x12\x0e\n\nFORM_FIELD\x10\x03\x12\x13\n\x0f\x46ORM_FIELD_NAME\x10\x04\x12\x17\n\x13\x46ORM_FIELD_CONTENTS\x10\x05\x12\t\n\x05TABLE\x10\x06\x12\x10\n\x0cTABLE_HEADER\x10\x07\x12\r\n\tTABLE_ROW\x10\x08\x12\x0e\n\nTABLE_CELL\x10\t"F\n\x03Row\x12\x17\n\x0f\x63olumn_spec_ids\x18\x02 \x03(\t\x12&\n\x06values\x18\x03 \x03(\x0b\x32\x16.google.protobuf.Value"\xfe\x01\n\x0e\x45xamplePayload\x12\x33\n\x05image\x18\x01 \x01(\x0b\x32".google.cloud.automl.v1beta1.ImageH\x00\x12@\n\x0ctext_snippet\x18\x02 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippetH\x00\x12\x39\n\x08\x64ocument\x18\x04 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DocumentH\x00\x12/\n\x03row\x18\x03 \x01(\x0b\x32 .google.cloud.automl.v1beta1.RowH\x00\x42\t\n\x07payloadB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/data_items.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x30google/cloud/automl_v1beta1/proto/geometry.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto\x1a\x34google/cloud/automl_v1beta1/proto/text_segment.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1cgoogle/api/annotations.proto"\x7f\n\x05Image\x12\x15\n\x0bimage_bytes\x18\x01 \x01(\x0cH\x00\x12@\n\x0cinput_config\x18\x06 \x01(\x0b\x32(.google.cloud.automl.v1beta1.InputConfigH\x00\x12\x15\n\rthumbnail_uri\x18\x04 \x01(\tB\x06\n\x04\x64\x61ta"F\n\x0bTextSnippet\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\t\x12\x11\n\tmime_type\x18\x02 \x01(\t\x12\x13\n\x0b\x63ontent_uri\x18\x04 \x01(\t"\xef\x01\n\x12\x44ocumentDimensions\x12S\n\x04unit\x18\x01 \x01(\x0e\x32\x45.google.cloud.automl.v1beta1.DocumentDimensions.DocumentDimensionUnit\x12\r\n\x05width\x18\x02 \x01(\x02\x12\x0e\n\x06height\x18\x03 \x01(\x02"e\n\x15\x44ocumentDimensionUnit\x12\'\n#DOCUMENT_DIMENSION_UNIT_UNSPECIFIED\x10\x00\x12\x08\n\x04INCH\x10\x01\x12\x0e\n\nCENTIMETER\x10\x02\x12\t\n\x05POINT\x10\x03"\xf9\x05\n\x08\x44ocument\x12\x46\n\x0cinput_config\x18\x01 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.DocumentInputConfig\x12?\n\rdocument_text\x18\x02 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippet\x12<\n\x06layout\x18\x03 \x03(\x0b\x32,.google.cloud.automl.v1beta1.Document.Layout\x12L\n\x13\x64ocument_dimensions\x18\x04 \x01(\x0b\x32/.google.cloud.automl.v1beta1.DocumentDimensions\x12\x12\n\npage_count\x18\x05 \x01(\x05\x1a\xc3\x03\n\x06Layout\x12>\n\x0ctext_segment\x18\x01 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSegment\x12\x13\n\x0bpage_number\x18\x02 \x01(\x05\x12@\n\rbounding_poly\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12W\n\x11text_segment_type\x18\x04 \x01(\x0e\x32<.google.cloud.automl.v1beta1.Document.Layout.TextSegmentType"\xc8\x01\n\x0fTextSegmentType\x12!\n\x1dTEXT_SEGMENT_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05TOKEN\x10\x01\x12\r\n\tPARAGRAPH\x10\x02\x12\x0e\n\nFORM_FIELD\x10\x03\x12\x13\n\x0f\x46ORM_FIELD_NAME\x10\x04\x12\x17\n\x13\x46ORM_FIELD_CONTENTS\x10\x05\x12\t\n\x05TABLE\x10\x06\x12\x10\n\x0cTABLE_HEADER\x10\x07\x12\r\n\tTABLE_ROW\x10\x08\x12\x0e\n\nTABLE_CELL\x10\t"F\n\x03Row\x12\x17\n\x0f\x63olumn_spec_ids\x18\x02 \x03(\t\x12&\n\x06values\x18\x03 \x03(\x0b\x32\x16.google.protobuf.Value"\xfe\x01\n\x0e\x45xamplePayload\x12\x33\n\x05image\x18\x01 \x01(\x0b\x32".google.cloud.automl.v1beta1.ImageH\x00\x12@\n\x0ctext_snippet\x18\x02 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippetH\x00\x12\x39\n\x08\x64ocument\x18\x04 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DocumentH\x00\x12/\n\x03row\x18\x03 \x01(\x0b\x32 .google.cloud.automl.v1beta1.RowH\x00\x42\t\n\x07payloadB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_geometry__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2.DESCRIPTOR, + google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2.DESCRIPTOR, google_dot_protobuf_dot_any__pb2.DESCRIPTOR, google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], ) @@ -77,8 +74,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=690, - serialized_end=791, + serialized_start=740, + serialized_end=841, ) _sym_db.RegisterEnumDescriptor(_DOCUMENTDIMENSIONS_DOCUMENTDIMENSIONUNIT) @@ -133,8 +130,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=1355, - serialized_end=1555, + serialized_start=1405, + serialized_end=1605, ) _sym_db.RegisterEnumDescriptor(_DOCUMENT_LAYOUT_TEXTSEGMENTTYPE) @@ -155,7 +152,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -191,7 +188,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -215,10 +212,10 @@ index=0, containing_type=None, fields=[], - ) + ), ], - serialized_start=350, - serialized_end=477, + serialized_start=400, + serialized_end=527, ) @@ -238,7 +235,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -256,7 +253,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -274,7 +271,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -292,8 +289,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=479, - serialized_end=549, + serialized_start=529, + serialized_end=599, ) @@ -361,14 +358,14 @@ ], extensions=[], nested_types=[], - enum_types=[_DOCUMENTDIMENSIONS_DOCUMENTDIMENSIONUNIT], + enum_types=[_DOCUMENTDIMENSIONS_DOCUMENTDIMENSIONUNIT,], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=552, - serialized_end=791, + serialized_start=602, + serialized_end=841, ) @@ -454,14 +451,14 @@ ], extensions=[], nested_types=[], - enum_types=[_DOCUMENT_LAYOUT_TEXTSEGMENTTYPE], + enum_types=[_DOCUMENT_LAYOUT_TEXTSEGMENTTYPE,], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1104, - serialized_end=1555, + serialized_start=1154, + serialized_end=1605, ) _DOCUMENT = _descriptor.Descriptor( @@ -563,15 +560,15 @@ ), ], extensions=[], - nested_types=[_DOCUMENT_LAYOUT], + nested_types=[_DOCUMENT_LAYOUT,], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=794, - serialized_end=1555, + serialized_start=844, + serialized_end=1605, ) @@ -627,8 +624,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1557, - serialized_end=1627, + serialized_start=1607, + serialized_end=1677, ) @@ -726,10 +723,10 @@ index=0, containing_type=None, fields=[], - ) + ), ], - serialized_start=1630, - serialized_end=1884, + serialized_start=1680, + serialized_end=1934, ) _IMAGE.fields_by_name[ @@ -806,21 +803,21 @@ Image = _reflection.GeneratedProtocolMessageType( "Image", (_message.Message,), - dict( - DESCRIPTOR=_IMAGE, - __module__="google.cloud.automl_v1beta1.proto.data_items_pb2", - __doc__="""A representation of an image. Only images up to 30MB in + { + "DESCRIPTOR": _IMAGE, + "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", + "__doc__": """A representation of an image. Only images up to 30MB in size are supported. Attributes: data: Input only. The data representing the image. For Predict calls - [image\_bytes][google.cloud.automl.v1beta1.Image.image\_bytes] + [image_bytes][google.cloud.automl.v1beta1.Image.image_bytes] must be set, as other options are not currently supported by prediction API. You can read the contents of an uploaded image by using the - [content\_uri][google.cloud.automl.v1beta1.Image.content\_uri] + [content_uri][google.cloud.automl.v1beta1.Image.content_uri] field. image_bytes: Image content represented as a stream of bytes. Note: As with @@ -832,17 +829,17 @@ Output only. HTTP URI to the thumbnail image. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Image) - ), + }, ) _sym_db.RegisterMessage(Image) TextSnippet = _reflection.GeneratedProtocolMessageType( "TextSnippet", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSNIPPET, - __module__="google.cloud.automl_v1beta1.proto.data_items_pb2", - __doc__="""A representation of a text snippet. + { + "DESCRIPTOR": _TEXTSNIPPET, + "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", + "__doc__": """A representation of a text snippet. Attributes: @@ -852,25 +849,25 @@ mime_type: Optional. The format of [content][google.cloud.automl.v1beta1.TextSnippet.content]. - Currently the only two allowed values are "text/html" and - "text/plain". If left blank, the format is automatically + Currently the only two allowed values are “text/html” and + “text/plain”. If left blank, the format is automatically determined from the type of the uploaded [content][google.cloud.automl.v1beta1.TextSnippet.content]. content_uri: Output only. HTTP URI where you can download the content. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSnippet) - ), + }, ) _sym_db.RegisterMessage(TextSnippet) DocumentDimensions = _reflection.GeneratedProtocolMessageType( "DocumentDimensions", (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENTDIMENSIONS, - __module__="google.cloud.automl_v1beta1.proto.data_items_pb2", - __doc__="""Message that describes dimension of a document. + { + "DESCRIPTOR": _DOCUMENTDIMENSIONS, + "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", + "__doc__": """Message that describes dimension of a document. Attributes: @@ -882,52 +879,52 @@ Height value of the document, works together with the unit. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DocumentDimensions) - ), + }, ) _sym_db.RegisterMessage(DocumentDimensions) Document = _reflection.GeneratedProtocolMessageType( "Document", (_message.Message,), - dict( - Layout=_reflection.GeneratedProtocolMessageType( + { + "Layout": _reflection.GeneratedProtocolMessageType( "Layout", (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENT_LAYOUT, - __module__="google.cloud.automl_v1beta1.proto.data_items_pb2", - __doc__="""Describes the layout information of a - [text\_segment][google.cloud.automl.v1beta1.Document.Layout.text\_segment] + { + "DESCRIPTOR": _DOCUMENT_LAYOUT, + "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", + "__doc__": """Describes the layout information of a + [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in the document. Attributes: text_segment: - Text Segment that represents a segment in [document\_text][goo - gle.cloud.automl.v1beta1.Document.document\_text]. + Text Segment that represents a segment in [document_text][goog + le.cloud.automl.v1beta1.Document.document_text]. page_number: - Page number of the [text\_segment][google.cloud.automl.v1beta1 - .Document.Layout.text\_segment] in the original document, - starts from 1. + Page number of the [text_segment][google.cloud.automl.v1beta1. + Document.Layout.text_segment] in the original document, starts + from 1. bounding_poly: - The position of the [text\_segment][google.cloud.automl.v1beta - 1.Document.Layout.text\_segment] in the page. Contains exactly - 4 [normalized\_vertices][google.cloud.automl.v1beta1.Bounding - Poly.normalized\_vertices] and they are connected by edges in - the order provided, which will represent a rectangle parallel - to the frame. The [NormalizedVertex-s][google.cloud.automl.v1b - eta1.NormalizedVertex] are relative to the page. Coordinates - are based on top-left as point (0,0). + The position of the [text_segment][google.cloud.automl.v1beta1 + .Document.Layout.text_segment] in the page. Contains exactly 4 + [normalized_vertices][google.cloud.automl.v1beta1.BoundingPoly + .normalized_vertices] and they are connected by edges in the + order provided, which will represent a rectangle parallel to + the frame. The [NormalizedVertex-s][google.cloud.automl.v1beta + 1.NormalizedVertex] are relative to the page. Coordinates are + based on top-left as point (0,0). text_segment_type: - The type of the [text\_segment][google.cloud.automl.v1beta1.Do - cument.Layout.text\_segment] in document. + The type of the [text_segment][google.cloud.automl.v1beta1.Doc + ument.Layout.text_segment] in document. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Document.Layout) - ), + }, ), - DESCRIPTOR=_DOCUMENT, - __module__="google.cloud.automl_v1beta1.proto.data_items_pb2", - __doc__="""A structured text document e.g. a PDF. + "DESCRIPTOR": _DOCUMENT, + "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", + "__doc__": """A structured text document e.g. a PDF. Attributes: @@ -937,14 +934,14 @@ The plain text version of this document. layout: Describes the layout of the document. Sorted by - [page\_number][]. + [page_number][]. document_dimensions: The dimensions of the page in the document. page_count: Number of pages in the document. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Document) - ), + }, ) _sym_db.RegisterMessage(Document) _sym_db.RegisterMessage(Document.Layout) @@ -952,40 +949,40 @@ Row = _reflection.GeneratedProtocolMessageType( "Row", (_message.Message,), - dict( - DESCRIPTOR=_ROW, - __module__="google.cloud.automl_v1beta1.proto.data_items_pb2", - __doc__="""A representation of a row in a relational table. + { + "DESCRIPTOR": _ROW, + "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", + "__doc__": """A representation of a row in a relational table. Attributes: column_spec_ids: The resource IDs of the column specs describing the columns of the row. If set must contain, but possibly in a different - order, all input feature [column\_spec\_ids][google.cloud.aut - oml.v1beta1.TablesModelMetadata.input\_feature\_column\_specs] - of the Model this row is being passed to. Note: The below + order, all input feature [column_spec_ids][google.cloud.autom + l.v1beta1.TablesModelMetadata.input_feature_column_specs] of + the Model this row is being passed to. Note: The below ``values`` field must match order of this field, if this field is set. values: Required. The values of the row cells, given in the same order - as the column\_spec\_ids, or, if not set, then in the same - order as input feature [column\_specs][google.cloud.automl.v1 - beta1.TablesModelMetadata.input\_feature\_column\_specs] of - the Model this row is being passed to. + as the column_spec_ids, or, if not set, then in the same order + as input feature [column_specs][google.cloud.automl.v1beta1.T + ablesModelMetadata.input_feature_column_specs] of the Model + this row is being passed to. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Row) - ), + }, ) _sym_db.RegisterMessage(Row) ExamplePayload = _reflection.GeneratedProtocolMessageType( "ExamplePayload", (_message.Message,), - dict( - DESCRIPTOR=_EXAMPLEPAYLOAD, - __module__="google.cloud.automl_v1beta1.proto.data_items_pb2", - __doc__="""Example data used for training or prediction. + { + "DESCRIPTOR": _EXAMPLEPAYLOAD, + "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", + "__doc__": """Example data used for training or prediction. Attributes: @@ -1001,7 +998,7 @@ Example relational table row. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExamplePayload) - ), + }, ) _sym_db.RegisterMessage(ExamplePayload) diff --git a/google/cloud/automl_v1beta1/proto/data_stats.proto b/google/cloud/automl_v1beta1/proto/data_stats.proto index 5d941a5b..c13a5d45 100644 --- a/google/cloud/automl_v1beta1/proto/data_stats.proto +++ b/google/cloud/automl_v1beta1/proto/data_stats.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/data_stats_pb2.py b/google/cloud/automl_v1beta1/proto/data_stats_pb2.py index 85f18cee..e6f9535c 100644 --- a/google/cloud/automl_v1beta1/proto/data_stats_pb2.py +++ b/google/cloud/automl_v1beta1/proto/data_stats_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/data_stats.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -22,13 +19,9 @@ name="google/cloud/automl_v1beta1/proto/data_stats.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n2google/cloud/automl_v1beta1/proto/data_stats.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\xfd\x03\n\tDataStats\x12\x42\n\rfloat64_stats\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.Float64StatsH\x00\x12@\n\x0cstring_stats\x18\x04 \x01(\x0b\x32(.google.cloud.automl.v1beta1.StringStatsH\x00\x12\x46\n\x0ftimestamp_stats\x18\x05 \x01(\x0b\x32+.google.cloud.automl.v1beta1.TimestampStatsH\x00\x12>\n\x0b\x61rray_stats\x18\x06 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ArrayStatsH\x00\x12@\n\x0cstruct_stats\x18\x07 \x01(\x0b\x32(.google.cloud.automl.v1beta1.StructStatsH\x00\x12\x44\n\x0e\x63\x61tegory_stats\x18\x08 \x01(\x0b\x32*.google.cloud.automl.v1beta1.CategoryStatsH\x00\x12\x1c\n\x14\x64istinct_value_count\x18\x01 \x01(\x03\x12\x18\n\x10null_value_count\x18\x02 \x01(\x03\x12\x19\n\x11valid_value_count\x18\t \x01(\x03\x42\x07\n\x05stats"\xdd\x01\n\x0c\x46loat64Stats\x12\x0c\n\x04mean\x18\x01 \x01(\x01\x12\x1a\n\x12standard_deviation\x18\x02 \x01(\x01\x12\x11\n\tquantiles\x18\x03 \x03(\x01\x12T\n\x11histogram_buckets\x18\x04 \x03(\x0b\x32\x39.google.cloud.automl.v1beta1.Float64Stats.HistogramBucket\x1a:\n\x0fHistogramBucket\x12\x0b\n\x03min\x18\x01 \x01(\x01\x12\x0b\n\x03max\x18\x02 \x01(\x01\x12\r\n\x05\x63ount\x18\x03 \x01(\x03"\x8d\x01\n\x0bStringStats\x12P\n\x11top_unigram_stats\x18\x01 \x03(\x0b\x32\x35.google.cloud.automl.v1beta1.StringStats.UnigramStats\x1a,\n\x0cUnigramStats\x12\r\n\x05value\x18\x01 \x01(\t\x12\r\n\x05\x63ount\x18\x02 \x01(\x03"\xf4\x02\n\x0eTimestampStats\x12V\n\x0egranular_stats\x18\x01 \x03(\x0b\x32>.google.cloud.automl.v1beta1.TimestampStats.GranularStatsEntry\x1a\x98\x01\n\rGranularStats\x12W\n\x07\x62uckets\x18\x01 \x03(\x0b\x32\x46.google.cloud.automl.v1beta1.TimestampStats.GranularStats.BucketsEntry\x1a.\n\x0c\x42ucketsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1ao\n\x12GranularStatsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12H\n\x05value\x18\x02 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TimestampStats.GranularStats:\x02\x38\x01"J\n\nArrayStats\x12<\n\x0cmember_stats\x18\x02 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats"\xb7\x01\n\x0bStructStats\x12M\n\x0b\x66ield_stats\x18\x01 \x03(\x0b\x32\x38.google.cloud.automl.v1beta1.StructStats.FieldStatsEntry\x1aY\n\x0f\x46ieldStatsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats:\x02\x38\x01"\xa0\x01\n\rCategoryStats\x12Z\n\x12top_category_stats\x18\x01 \x03(\x0b\x32>.google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats\x1a\x33\n\x13SingleCategoryStats\x12\r\n\x05value\x18\x01 \x01(\t\x12\r\n\x05\x63ount\x18\x02 \x01(\x03"%\n\x10\x43orrelationStats\x12\x11\n\tcramers_v\x18\x01 \x01(\x01\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR], + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/data_stats.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\xfd\x03\n\tDataStats\x12\x42\n\rfloat64_stats\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.Float64StatsH\x00\x12@\n\x0cstring_stats\x18\x04 \x01(\x0b\x32(.google.cloud.automl.v1beta1.StringStatsH\x00\x12\x46\n\x0ftimestamp_stats\x18\x05 \x01(\x0b\x32+.google.cloud.automl.v1beta1.TimestampStatsH\x00\x12>\n\x0b\x61rray_stats\x18\x06 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ArrayStatsH\x00\x12@\n\x0cstruct_stats\x18\x07 \x01(\x0b\x32(.google.cloud.automl.v1beta1.StructStatsH\x00\x12\x44\n\x0e\x63\x61tegory_stats\x18\x08 \x01(\x0b\x32*.google.cloud.automl.v1beta1.CategoryStatsH\x00\x12\x1c\n\x14\x64istinct_value_count\x18\x01 \x01(\x03\x12\x18\n\x10null_value_count\x18\x02 \x01(\x03\x12\x19\n\x11valid_value_count\x18\t \x01(\x03\x42\x07\n\x05stats"\xdd\x01\n\x0c\x46loat64Stats\x12\x0c\n\x04mean\x18\x01 \x01(\x01\x12\x1a\n\x12standard_deviation\x18\x02 \x01(\x01\x12\x11\n\tquantiles\x18\x03 \x03(\x01\x12T\n\x11histogram_buckets\x18\x04 \x03(\x0b\x32\x39.google.cloud.automl.v1beta1.Float64Stats.HistogramBucket\x1a:\n\x0fHistogramBucket\x12\x0b\n\x03min\x18\x01 \x01(\x01\x12\x0b\n\x03max\x18\x02 \x01(\x01\x12\r\n\x05\x63ount\x18\x03 \x01(\x03"\x8d\x01\n\x0bStringStats\x12P\n\x11top_unigram_stats\x18\x01 \x03(\x0b\x32\x35.google.cloud.automl.v1beta1.StringStats.UnigramStats\x1a,\n\x0cUnigramStats\x12\r\n\x05value\x18\x01 \x01(\t\x12\r\n\x05\x63ount\x18\x02 \x01(\x03"\xf4\x02\n\x0eTimestampStats\x12V\n\x0egranular_stats\x18\x01 \x03(\x0b\x32>.google.cloud.automl.v1beta1.TimestampStats.GranularStatsEntry\x1a\x98\x01\n\rGranularStats\x12W\n\x07\x62uckets\x18\x01 \x03(\x0b\x32\x46.google.cloud.automl.v1beta1.TimestampStats.GranularStats.BucketsEntry\x1a.\n\x0c\x42ucketsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1ao\n\x12GranularStatsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12H\n\x05value\x18\x02 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TimestampStats.GranularStats:\x02\x38\x01"J\n\nArrayStats\x12<\n\x0cmember_stats\x18\x02 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats"\xb7\x01\n\x0bStructStats\x12M\n\x0b\x66ield_stats\x18\x01 \x03(\x0b\x32\x38.google.cloud.automl.v1beta1.StructStats.FieldStatsEntry\x1aY\n\x0f\x46ieldStatsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats:\x02\x38\x01"\xa0\x01\n\rCategoryStats\x12Z\n\x12top_category_stats\x18\x01 \x03(\x0b\x32>.google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats\x1a\x33\n\x13SingleCategoryStats\x12\r\n\x05value\x18\x01 \x01(\t\x12\r\n\x05\x63ount\x18\x02 \x01(\x03"%\n\x10\x43orrelationStats\x12\x11\n\tcramers_v\x18\x01 \x01(\x01\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], ) @@ -216,7 +209,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=114, serialized_end=623, @@ -378,7 +371,7 @@ ), ], extensions=[], - nested_types=[_FLOAT64STATS_HISTOGRAMBUCKET], + nested_types=[_FLOAT64STATS_HISTOGRAMBUCKET,], enum_types=[], serialized_options=None, is_extendable=False, @@ -406,7 +399,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -470,10 +463,10 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], - nested_types=[_STRINGSTATS_UNIGRAMSTATS], + nested_types=[_STRINGSTATS_UNIGRAMSTATS,], enum_types=[], serialized_options=None, is_extendable=False, @@ -532,7 +525,7 @@ extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -565,10 +558,10 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], - nested_types=[_TIMESTAMPSTATS_GRANULARSTATS_BUCKETSENTRY], + nested_types=[_TIMESTAMPSTATS_GRANULARSTATS_BUCKETSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -595,7 +588,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -626,7 +619,7 @@ extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -659,10 +652,10 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], - nested_types=[_TIMESTAMPSTATS_GRANULARSTATS, _TIMESTAMPSTATS_GRANULARSTATSENTRY], + nested_types=[_TIMESTAMPSTATS_GRANULARSTATS, _TIMESTAMPSTATS_GRANULARSTATSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -698,7 +691,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -729,7 +722,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -760,7 +753,7 @@ extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -793,10 +786,10 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], - nested_types=[_STRUCTSTATS_FIELDSTATSENTRY], + nested_types=[_STRUCTSTATS_FIELDSTATSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -824,7 +817,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -888,10 +881,10 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], - nested_types=[_CATEGORYSTATS_SINGLECATEGORYSTATS], + nested_types=[_CATEGORYSTATS_SINGLECATEGORYSTATS,], enum_types=[], serialized_options=None, is_extendable=False, @@ -927,7 +920,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -1026,10 +1019,10 @@ DataStats = _reflection.GeneratedProtocolMessageType( "DataStats", (_message.Message,), - dict( - DESCRIPTOR=_DATASTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The data statistics of a series of values that share the + { + "DESCRIPTOR": _DATASTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The data statistics of a series of values that share the same DataType. @@ -1056,21 +1049,21 @@ The number of values that are valid. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DataStats) - ), + }, ) _sym_db.RegisterMessage(DataStats) Float64Stats = _reflection.GeneratedProtocolMessageType( "Float64Stats", (_message.Message,), - dict( - HistogramBucket=_reflection.GeneratedProtocolMessageType( + { + "HistogramBucket": _reflection.GeneratedProtocolMessageType( "HistogramBucket", (_message.Message,), - dict( - DESCRIPTOR=_FLOAT64STATS_HISTOGRAMBUCKET, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""A bucket of a histogram. + { + "DESCRIPTOR": _FLOAT64STATS_HISTOGRAMBUCKET, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """A bucket of a histogram. Attributes: @@ -1078,17 +1071,17 @@ The minimum value of the bucket, inclusive. max: The maximum value of the bucket, exclusive unless max = - ``"Infinity"``, in which case it's inclusive. + ``"Infinity"``, in which case it’s inclusive. count: - The number of data values that are in the bucket, i.e. are + The number of data values that are in the bucket, i.e. are between min and max values. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Float64Stats.HistogramBucket) - ), + }, ), - DESCRIPTOR=_FLOAT64STATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The data statistics of a series of FLOAT64 values. + "DESCRIPTOR": _FLOAT64STATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The data statistics of a series of FLOAT64 values. Attributes: @@ -1098,7 +1091,7 @@ The standard deviation of the series. quantiles: Ordered from 0 to k k-quantile values of the data series of n - values. The value at index i is, approximately, the i\*n/k-th + values. The value at index i is, approximately, the i*n/k-th smallest value in the series; for i = 0 and i = k these are, respectively, the min and max values. histogram_buckets: @@ -1110,7 +1103,7 @@ ``"Infinity"``. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Float64Stats) - ), + }, ) _sym_db.RegisterMessage(Float64Stats) _sym_db.RegisterMessage(Float64Stats.HistogramBucket) @@ -1118,14 +1111,14 @@ StringStats = _reflection.GeneratedProtocolMessageType( "StringStats", (_message.Message,), - dict( - UnigramStats=_reflection.GeneratedProtocolMessageType( + { + "UnigramStats": _reflection.GeneratedProtocolMessageType( "UnigramStats", (_message.Message,), - dict( - DESCRIPTOR=_STRINGSTATS_UNIGRAMSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The statistics of a unigram. + { + "DESCRIPTOR": _STRINGSTATS_UNIGRAMSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The statistics of a unigram. Attributes: @@ -1135,11 +1128,11 @@ The number of occurrences of this unigram in the series. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StringStats.UnigramStats) - ), + }, ), - DESCRIPTOR=_STRINGSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The data statistics of a series of STRING values. + "DESCRIPTOR": _STRINGSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The data statistics of a series of STRING values. Attributes: @@ -1148,7 +1141,7 @@ le.cloud.automl.v1beta1.StringStats.UnigramStats.count]. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StringStats) - ), + }, ) _sym_db.RegisterMessage(StringStats) _sym_db.RegisterMessage(StringStats.UnigramStats) @@ -1156,58 +1149,58 @@ TimestampStats = _reflection.GeneratedProtocolMessageType( "TimestampStats", (_message.Message,), - dict( - GranularStats=_reflection.GeneratedProtocolMessageType( + { + "GranularStats": _reflection.GeneratedProtocolMessageType( "GranularStats", (_message.Message,), - dict( - BucketsEntry=_reflection.GeneratedProtocolMessageType( + { + "BucketsEntry": _reflection.GeneratedProtocolMessageType( "BucketsEntry", (_message.Message,), - dict( - DESCRIPTOR=_TIMESTAMPSTATS_GRANULARSTATS_BUCKETSENTRY, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2" + { + "DESCRIPTOR": _TIMESTAMPSTATS_GRANULARSTATS_BUCKETSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TimestampStats.GranularStats.BucketsEntry) - ), + }, ), - DESCRIPTOR=_TIMESTAMPSTATS_GRANULARSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""Stats split by a defined in context granularity. + "DESCRIPTOR": _TIMESTAMPSTATS_GRANULARSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """Stats split by a defined in context granularity. Attributes: buckets: A map from granularity key to example count for that key. E.g. - for hour\_of\_day ``13`` means 1pm, or for month\_of\_year - ``5`` means May). + for hour_of_day ``13`` means 1pm, or for month_of_year ``5`` + means May). """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TimestampStats.GranularStats) - ), + }, ), - GranularStatsEntry=_reflection.GeneratedProtocolMessageType( + "GranularStatsEntry": _reflection.GeneratedProtocolMessageType( "GranularStatsEntry", (_message.Message,), - dict( - DESCRIPTOR=_TIMESTAMPSTATS_GRANULARSTATSENTRY, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2" + { + "DESCRIPTOR": _TIMESTAMPSTATS_GRANULARSTATSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TimestampStats.GranularStatsEntry) - ), + }, ), - DESCRIPTOR=_TIMESTAMPSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The data statistics of a series of TIMESTAMP values. + "DESCRIPTOR": _TIMESTAMPSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The data statistics of a series of TIMESTAMP values. Attributes: granular_stats: The string key is the pre-defined granularity. Currently - supported: hour\_of\_day, day\_of\_week, month\_of\_year. + supported: hour_of_day, day_of_week, month_of_year. Granularities finer that the granularity of timestamp data are - not populated (e.g. if timestamps are at day granularity, then - hour\_of\_day is not populated). + not populated (e.g. if timestamps are at day granularity, then + hour_of_day is not populated). """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TimestampStats) - ), + }, ) _sym_db.RegisterMessage(TimestampStats) _sym_db.RegisterMessage(TimestampStats.GranularStats) @@ -1217,10 +1210,10 @@ ArrayStats = _reflection.GeneratedProtocolMessageType( "ArrayStats", (_message.Message,), - dict( - DESCRIPTOR=_ARRAYSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The data statistics of a series of ARRAY values. + { + "DESCRIPTOR": _ARRAYSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The data statistics of a series of ARRAY values. Attributes: @@ -1230,26 +1223,26 @@ type of the array. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ArrayStats) - ), + }, ) _sym_db.RegisterMessage(ArrayStats) StructStats = _reflection.GeneratedProtocolMessageType( "StructStats", (_message.Message,), - dict( - FieldStatsEntry=_reflection.GeneratedProtocolMessageType( + { + "FieldStatsEntry": _reflection.GeneratedProtocolMessageType( "FieldStatsEntry", (_message.Message,), - dict( - DESCRIPTOR=_STRUCTSTATS_FIELDSTATSENTRY, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2" + { + "DESCRIPTOR": _STRUCTSTATS_FIELDSTATSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StructStats.FieldStatsEntry) - ), + }, ), - DESCRIPTOR=_STRUCTSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The data statistics of a series of STRUCT values. + "DESCRIPTOR": _STRUCTSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The data statistics of a series of STRUCT values. Attributes: @@ -1258,7 +1251,7 @@ over series of all data in that field across all the structs. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StructStats) - ), + }, ) _sym_db.RegisterMessage(StructStats) _sym_db.RegisterMessage(StructStats.FieldStatsEntry) @@ -1266,14 +1259,14 @@ CategoryStats = _reflection.GeneratedProtocolMessageType( "CategoryStats", (_message.Message,), - dict( - SingleCategoryStats=_reflection.GeneratedProtocolMessageType( + { + "SingleCategoryStats": _reflection.GeneratedProtocolMessageType( "SingleCategoryStats", (_message.Message,), - dict( - DESCRIPTOR=_CATEGORYSTATS_SINGLECATEGORYSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The statistics of a single CATEGORY value. + { + "DESCRIPTOR": _CATEGORYSTATS_SINGLECATEGORYSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The statistics of a single CATEGORY value. Attributes: @@ -1283,11 +1276,11 @@ The number of occurrences of this value in the series. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats) - ), + }, ), - DESCRIPTOR=_CATEGORYSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The data statistics of a series of CATEGORY values. + "DESCRIPTOR": _CATEGORYSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The data statistics of a series of CATEGORY values. Attributes: @@ -1297,7 +1290,7 @@ ats.count]. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CategoryStats) - ), + }, ) _sym_db.RegisterMessage(CategoryStats) _sym_db.RegisterMessage(CategoryStats.SingleCategoryStats) @@ -1305,20 +1298,20 @@ CorrelationStats = _reflection.GeneratedProtocolMessageType( "CorrelationStats", (_message.Message,), - dict( - DESCRIPTOR=_CORRELATIONSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""A correlation statistics between two series of DataType + { + "DESCRIPTOR": _CORRELATIONSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """A correlation statistics between two series of DataType values. The series may have differing DataType-s, but within a single series the DataType must be the same. Attributes: cramers_v: - The correlation value using the Cramer's V measure. + The correlation value using the Cramer’s V measure. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CorrelationStats) - ), + }, ) _sym_db.RegisterMessage(CorrelationStats) diff --git a/google/cloud/automl_v1beta1/proto/data_types.proto b/google/cloud/automl_v1beta1/proto/data_types.proto index 086e96e3..6f77f56b 100644 --- a/google/cloud/automl_v1beta1/proto/data_types.proto +++ b/google/cloud/automl_v1beta1/proto/data_types.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/data_types_pb2.py b/google/cloud/automl_v1beta1/proto/data_types_pb2.py index 96121059..d87b6e8a 100644 --- a/google/cloud/automl_v1beta1/proto/data_types_pb2.py +++ b/google/cloud/automl_v1beta1/proto/data_types_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/data_types.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -23,13 +20,9 @@ name="google/cloud/automl_v1beta1/proto/data_types.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n2google/cloud/automl_v1beta1/proto/data_types.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\xfc\x01\n\x08\x44\x61taType\x12\x42\n\x11list_element_type\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataTypeH\x00\x12>\n\x0bstruct_type\x18\x03 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.StructTypeH\x00\x12\x15\n\x0btime_format\x18\x05 \x01(\tH\x00\x12\x38\n\ttype_code\x18\x01 \x01(\x0e\x32%.google.cloud.automl.v1beta1.TypeCode\x12\x10\n\x08nullable\x18\x04 \x01(\x08\x42\t\n\x07\x64\x65tails"\xa7\x01\n\nStructType\x12\x43\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x33.google.cloud.automl.v1beta1.StructType.FieldsEntry\x1aT\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataType:\x02\x38\x01*r\n\x08TypeCode\x12\x19\n\x15TYPE_CODE_UNSPECIFIED\x10\x00\x12\x0b\n\x07\x46LOAT64\x10\x03\x12\r\n\tTIMESTAMP\x10\x04\x12\n\n\x06STRING\x10\x06\x12\t\n\x05\x41RRAY\x10\x08\x12\n\n\x06STRUCT\x10\t\x12\x0c\n\x08\x43\x41TEGORY\x10\nB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR], + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/data_types.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\xfc\x01\n\x08\x44\x61taType\x12\x42\n\x11list_element_type\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataTypeH\x00\x12>\n\x0bstruct_type\x18\x03 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.StructTypeH\x00\x12\x15\n\x0btime_format\x18\x05 \x01(\tH\x00\x12\x38\n\ttype_code\x18\x01 \x01(\x0e\x32%.google.cloud.automl.v1beta1.TypeCode\x12\x10\n\x08nullable\x18\x04 \x01(\x08\x42\t\n\x07\x64\x65tails"\xa7\x01\n\nStructType\x12\x43\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x33.google.cloud.automl.v1beta1.StructType.FieldsEntry\x1aT\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataType:\x02\x38\x01*r\n\x08TypeCode\x12\x19\n\x15TYPE_CODE_UNSPECIFIED\x10\x00\x12\x0b\n\x07\x46LOAT64\x10\x03\x12\r\n\tTIMESTAMP\x10\x04\x12\n\n\x06STRING\x10\x06\x12\t\n\x05\x41RRAY\x10\x08\x12\n\n\x06STRUCT\x10\t\x12\x0c\n\x08\x43\x41TEGORY\x10\nB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], ) _TYPECODE = _descriptor.EnumDescriptor( @@ -133,7 +126,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -193,7 +186,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=114, serialized_end=366, @@ -216,7 +209,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -247,7 +240,7 @@ extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -280,10 +273,10 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], - nested_types=[_STRUCTTYPE_FIELDSENTRY], + nested_types=[_STRUCTTYPE_FIELDSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -326,39 +319,36 @@ DataType = _reflection.GeneratedProtocolMessageType( "DataType", (_message.Message,), - dict( - DESCRIPTOR=_DATATYPE, - __module__="google.cloud.automl_v1beta1.proto.data_types_pb2", - __doc__="""Indicated the type of data that can be stored in a - structured data entity (e.g. a table). + { + "DESCRIPTOR": _DATATYPE, + "__module__": "google.cloud.automl_v1beta1.proto.data_types_pb2", + "__doc__": """Indicated the type of data that can be stored in a + structured data entity (e.g. a table). Attributes: details: Details of DataType-s that need additional specification. list_element_type: - If - [type\_code][google.cloud.automl.v1beta1.DataType.type\_code] + If [type_code][google.cloud.automl.v1beta1.DataType.type_code] == [ARRAY][google.cloud.automl.v1beta1.TypeCode.ARRAY], then ``list_element_type`` is the type of the elements. struct_type: - If - [type\_code][google.cloud.automl.v1beta1.DataType.type\_code] + If [type_code][google.cloud.automl.v1beta1.DataType.type_code] == [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT], then - ``struct_type`` provides type information for the struct's + ``struct_type`` provides type information for the struct’s fields. time_format: - If - [type\_code][google.cloud.automl.v1beta1.DataType.type\_code] + If [type_code][google.cloud.automl.v1beta1.DataType.type_code] == [TIMESTAMP][google.cloud.automl.v1beta1.TypeCode.TIMESTAMP] then ``time_format`` provides the format in which that time - field is expressed. The time\_format must either be one of: \* + field is expressed. The time_format must either be one of: \* ``UNIX_SECONDS`` \* ``UNIX_MILLISECONDS`` \* ``UNIX_MICROSECONDS`` \* ``UNIX_NANOSECONDS`` (for respectively number of seconds, milliseconds, microseconds and nanoseconds since start of the Unix epoch); or be written in - ``strftime`` syntax. If time\_format is not set, then the - default format as described on the type\_code is used. + ``strftime`` syntax. If time_format is not set, then the + default format as described on the type_code is used. type_code: Required. The [TypeCode][google.cloud.automl.v1beta1.TypeCode] for this type. @@ -367,26 +357,26 @@ ``NULL`` value is expressed as an empty string. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DataType) - ), + }, ) _sym_db.RegisterMessage(DataType) StructType = _reflection.GeneratedProtocolMessageType( "StructType", (_message.Message,), - dict( - FieldsEntry=_reflection.GeneratedProtocolMessageType( + { + "FieldsEntry": _reflection.GeneratedProtocolMessageType( "FieldsEntry", (_message.Message,), - dict( - DESCRIPTOR=_STRUCTTYPE_FIELDSENTRY, - __module__="google.cloud.automl_v1beta1.proto.data_types_pb2" + { + "DESCRIPTOR": _STRUCTTYPE_FIELDSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.data_types_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StructType.FieldsEntry) - ), + }, ), - DESCRIPTOR=_STRUCTTYPE, - __module__="google.cloud.automl_v1beta1.proto.data_types_pb2", - __doc__="""\ ``StructType`` defines the DataType-s of a + "DESCRIPTOR": _STRUCTTYPE, + "__module__": "google.cloud.automl_v1beta1.proto.data_types_pb2", + "__doc__": """\ ``StructType`` defines the DataType-s of a [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT] type. @@ -397,7 +387,7 @@ data types are still mutable. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StructType) - ), + }, ) _sym_db.RegisterMessage(StructType) _sym_db.RegisterMessage(StructType.FieldsEntry) diff --git a/google/cloud/automl_v1beta1/proto/dataset.proto b/google/cloud/automl_v1beta1/proto/dataset.proto index e07b1784..8d1b8d93 100644 --- a/google/cloud/automl_v1beta1/proto/dataset.proto +++ b/google/cloud/automl_v1beta1/proto/dataset.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; +import "google/api/resource.proto"; import "google/cloud/automl/v1beta1/image.proto"; import "google/cloud/automl/v1beta1/tables.proto"; import "google/cloud/automl/v1beta1/text.proto"; @@ -34,6 +34,11 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // A workspace for solving a single, particular machine learning (ML) problem. // A workspace contains examples that may be annotated. message Dataset { + option (google.api.resource) = { + type: "automl.googleapis.com/Dataset" + pattern: "projects/{project}/locations/{location}/datasets/{dataset}" + }; + // Required. // The dataset metadata that is specific to the problem type. oneof dataset_metadata { diff --git a/google/cloud/automl_v1beta1/proto/dataset_pb2.py b/google/cloud/automl_v1beta1/proto/dataset_pb2.py index fa8b2153..acc0a85c 100644 --- a/google/cloud/automl_v1beta1/proto/dataset_pb2.py +++ b/google/cloud/automl_v1beta1/proto/dataset_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/dataset.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,6 +12,7 @@ _sym_db = _symbol_database.Default() +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1beta1.proto import ( image_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2, ) @@ -38,13 +36,10 @@ name="google/cloud/automl_v1beta1/proto/dataset.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - "\n/google/cloud/automl_v1beta1/proto/dataset.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a,google/cloud/automl_v1beta1/proto/text.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a-google/cloud/automl_v1beta1/proto/video.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto\"\xee\x08\n\x07\x44\x61taset\x12_\n\x1ctranslation_dataset_metadata\x18\x17 \x01(\x0b\x32\x37.google.cloud.automl.v1beta1.TranslationDatasetMetadataH\x00\x12p\n%image_classification_dataset_metadata\x18\x18 \x01(\x0b\x32?.google.cloud.automl.v1beta1.ImageClassificationDatasetMetadataH\x00\x12n\n$text_classification_dataset_metadata\x18\x19 \x01(\x0b\x32>.google.cloud.automl.v1beta1.TextClassificationDatasetMetadataH\x00\x12s\n'image_object_detection_dataset_metadata\x18\x1a \x01(\x0b\x32@.google.cloud.automl.v1beta1.ImageObjectDetectionDatasetMetadataH\x00\x12p\n%video_classification_dataset_metadata\x18\x1f \x01(\x0b\x32?.google.cloud.automl.v1beta1.VideoClassificationDatasetMetadataH\x00\x12q\n&video_object_tracking_dataset_metadata\x18\x1d \x01(\x0b\x32?.google.cloud.automl.v1beta1.VideoObjectTrackingDatasetMetadataH\x00\x12\x66\n text_extraction_dataset_metadata\x18\x1c \x01(\x0b\x32:.google.cloud.automl.v1beta1.TextExtractionDatasetMetadataH\x00\x12\x64\n\x1ftext_sentiment_dataset_metadata\x18\x1e \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TextSentimentDatasetMetadataH\x00\x12U\n\x17tables_dataset_metadata\x18! \x01(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesDatasetMetadataH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\x15\n\rexample_count\x18\x15 \x01(\x05\x12/\n\x0b\x63reate_time\x18\x0e \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04\x65tag\x18\x11 \x01(\tB\x12\n\x10\x64\x61taset_metadataB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3" - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b"\n/google/cloud/automl_v1beta1/proto/dataset.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a,google/cloud/automl_v1beta1/proto/text.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a-google/cloud/automl_v1beta1/proto/video.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto\"\xce\t\n\x07\x44\x61taset\x12_\n\x1ctranslation_dataset_metadata\x18\x17 \x01(\x0b\x32\x37.google.cloud.automl.v1beta1.TranslationDatasetMetadataH\x00\x12p\n%image_classification_dataset_metadata\x18\x18 \x01(\x0b\x32?.google.cloud.automl.v1beta1.ImageClassificationDatasetMetadataH\x00\x12n\n$text_classification_dataset_metadata\x18\x19 \x01(\x0b\x32>.google.cloud.automl.v1beta1.TextClassificationDatasetMetadataH\x00\x12s\n'image_object_detection_dataset_metadata\x18\x1a \x01(\x0b\x32@.google.cloud.automl.v1beta1.ImageObjectDetectionDatasetMetadataH\x00\x12p\n%video_classification_dataset_metadata\x18\x1f \x01(\x0b\x32?.google.cloud.automl.v1beta1.VideoClassificationDatasetMetadataH\x00\x12q\n&video_object_tracking_dataset_metadata\x18\x1d \x01(\x0b\x32?.google.cloud.automl.v1beta1.VideoObjectTrackingDatasetMetadataH\x00\x12\x66\n text_extraction_dataset_metadata\x18\x1c \x01(\x0b\x32:.google.cloud.automl.v1beta1.TextExtractionDatasetMetadataH\x00\x12\x64\n\x1ftext_sentiment_dataset_metadata\x18\x1e \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TextSentimentDatasetMetadataH\x00\x12U\n\x17tables_dataset_metadata\x18! \x01(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesDatasetMetadataH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\x15\n\rexample_count\x18\x15 \x01(\x05\x12/\n\x0b\x63reate_time\x18\x0e \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04\x65tag\x18\x11 \x01(\t:^\xea\x41[\n\x1d\x61utoml.googleapis.com/Dataset\x12:projects/{project}/locations/{location}/datasets/{dataset}B\x12\n\x10\x64\x61taset_metadataB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3", dependencies=[ + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__pb2.DESCRIPTOR, @@ -234,7 +229,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -252,7 +247,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -270,7 +265,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -324,7 +319,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -337,7 +332,7 @@ extensions=[], nested_types=[], enum_types=[], - serialized_options=None, + serialized_options=b"\352A[\n\035automl.googleapis.com/Dataset\022:projects/{project}/locations/{location}/datasets/{dataset}", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -348,10 +343,10 @@ index=0, containing_type=None, fields=[], - ) + ), ], - serialized_start=385, - serialized_end=1519, + serialized_start=412, + serialized_end=1642, ) _DATASET.fields_by_name[ @@ -462,10 +457,10 @@ Dataset = _reflection.GeneratedProtocolMessageType( "Dataset", (_message.Message,), - dict( - DESCRIPTOR=_DATASET, - __module__="google.cloud.automl_v1beta1.proto.dataset_pb2", - __doc__="""A workspace for solving a single, particular machine + { + "DESCRIPTOR": _DATASET, + "__module__": "google.cloud.automl_v1beta1.proto.dataset_pb2", + "__doc__": """A workspace for solving a single, particular machine learning (ML) problem. A workspace contains examples that may be annotated. @@ -498,8 +493,8 @@ display_name: Required. The name of the dataset to show in the interface. The name can be up to 32 characters long and can consist only - of ASCII Latin letters A-Z and a-z, underscores (\_), and - ASCII digits 0-9. + of ASCII Latin letters A-Z and a-z, underscores (_), and ASCII + digits 0-9. description: User-provided description of the dataset. The description can be up to 25000 characters long. @@ -509,13 +504,14 @@ Output only. Timestamp when this dataset was created. etag: Used to perform consistent read-modify-write updates. If not - set, a blind "overwrite" update happens. + set, a blind “overwrite” update happens. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Dataset) - ), + }, ) _sym_db.RegisterMessage(Dataset) DESCRIPTOR._options = None +_DATASET._options = None # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/detection.proto b/google/cloud/automl_v1beta1/proto/detection.proto index 99761fd5..c5864e20 100644 --- a/google/cloud/automl_v1beta1/proto/detection.proto +++ b/google/cloud/automl_v1beta1/proto/detection.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/detection_pb2.py b/google/cloud/automl_v1beta1/proto/detection_pb2.py index ab328c84..4ea89ff9 100644 --- a/google/cloud/automl_v1beta1/proto/detection_pb2.py +++ b/google/cloud/automl_v1beta1/proto/detection_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/detection.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -26,12 +23,8 @@ name="google/cloud/automl_v1beta1/proto/detection.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n1google/cloud/automl_v1beta1/proto/detection.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x30google/cloud/automl_v1beta1/proto/geometry.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/api/annotations.proto"p\n\x1eImageObjectDetectionAnnotation\x12?\n\x0c\x62ounding_box\x18\x01 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12\r\n\x05score\x18\x02 \x01(\x02"\xb4\x01\n\x1dVideoObjectTrackingAnnotation\x12\x13\n\x0binstance_id\x18\x01 \x01(\t\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12?\n\x0c\x62ounding_box\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12\r\n\x05score\x18\x04 \x01(\x02"\xae\x02\n\x17\x42oundingBoxMetricsEntry\x12\x15\n\riou_threshold\x18\x01 \x01(\x02\x12\x1e\n\x16mean_average_precision\x18\x02 \x01(\x02\x12o\n\x1a\x63onfidence_metrics_entries\x18\x03 \x03(\x0b\x32K.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.ConfidenceMetricsEntry\x1ak\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x11\n\tprecision\x18\x03 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x04 \x01(\x02"\xd6\x01\n%ImageObjectDetectionEvaluationMetrics\x12$\n\x1c\x65valuated_bounding_box_count\x18\x01 \x01(\x05\x12Z\n\x1c\x62ounding_box_metrics_entries\x18\x02 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry\x12+\n#bounding_box_mean_average_precision\x18\x03 \x01(\x02"\xf4\x01\n$VideoObjectTrackingEvaluationMetrics\x12\x1d\n\x15\x65valuated_frame_count\x18\x01 \x01(\x05\x12$\n\x1c\x65valuated_bounding_box_count\x18\x02 \x01(\x05\x12Z\n\x1c\x62ounding_box_metrics_entries\x18\x04 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry\x12+\n#bounding_box_mean_average_precision\x18\x06 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n1google/cloud/automl_v1beta1/proto/detection.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x30google/cloud/automl_v1beta1/proto/geometry.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/api/annotations.proto"p\n\x1eImageObjectDetectionAnnotation\x12?\n\x0c\x62ounding_box\x18\x01 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12\r\n\x05score\x18\x02 \x01(\x02"\xb4\x01\n\x1dVideoObjectTrackingAnnotation\x12\x13\n\x0binstance_id\x18\x01 \x01(\t\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12?\n\x0c\x62ounding_box\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12\r\n\x05score\x18\x04 \x01(\x02"\xae\x02\n\x17\x42oundingBoxMetricsEntry\x12\x15\n\riou_threshold\x18\x01 \x01(\x02\x12\x1e\n\x16mean_average_precision\x18\x02 \x01(\x02\x12o\n\x1a\x63onfidence_metrics_entries\x18\x03 \x03(\x0b\x32K.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.ConfidenceMetricsEntry\x1ak\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x11\n\tprecision\x18\x03 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x04 \x01(\x02"\xd6\x01\n%ImageObjectDetectionEvaluationMetrics\x12$\n\x1c\x65valuated_bounding_box_count\x18\x01 \x01(\x05\x12Z\n\x1c\x62ounding_box_metrics_entries\x18\x02 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry\x12+\n#bounding_box_mean_average_precision\x18\x03 \x01(\x02"\xf4\x01\n$VideoObjectTrackingEvaluationMetrics\x12\x1d\n\x15\x65valuated_frame_count\x18\x01 \x01(\x05\x12$\n\x1c\x65valuated_bounding_box_count\x18\x02 \x01(\x05\x12Z\n\x1c\x62ounding_box_metrics_entries\x18\x04 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry\x12+\n#bounding_box_mean_average_precision\x18\x06 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_geometry__pb2.DESCRIPTOR, google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, @@ -113,7 +106,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -345,7 +338,7 @@ ), ], extensions=[], - nested_types=[_BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY], + nested_types=[_BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -567,10 +560,10 @@ ImageObjectDetectionAnnotation = _reflection.GeneratedProtocolMessageType( "ImageObjectDetectionAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_IMAGEOBJECTDETECTIONANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.detection_pb2", - __doc__="""Annotation details for image object detection. + { + "DESCRIPTOR": _IMAGEOBJECTDETECTIONANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", + "__doc__": """Annotation details for image object detection. Attributes: @@ -582,57 +575,58 @@ positivity confidence. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionAnnotation) - ), + }, ) _sym_db.RegisterMessage(ImageObjectDetectionAnnotation) VideoObjectTrackingAnnotation = _reflection.GeneratedProtocolMessageType( "VideoObjectTrackingAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOOBJECTTRACKINGANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.detection_pb2", - __doc__="""Annotation details for video object tracking. + { + "DESCRIPTOR": _VIDEOOBJECTTRACKINGANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", + "__doc__": """Annotation details for video object tracking. Attributes: instance_id: Optional. The instance of the object, expressed as a positive - integer. Used to tell apart objects of the same type (i.e. - AnnotationSpec) when multiple are present on a single example. - NOTE: Instance ID prediction quality is not a part of model - evaluation and is done as best effort. Especially in cases - when an entity goes off-screen for a longer time (minutes), - when it comes back it may be given a new instance ID. + integer. Used to tell apart objects of the same type + (i.e. AnnotationSpec) when multiple are present on a single + example. NOTE: Instance ID prediction quality is not a part of + model evaluation and is done as best effort. Especially in + cases when an entity goes off-screen for a longer time + (minutes), when it comes back it may be given a new instance + ID. time_offset: Required. A time (frame) of a video to which this annotation - pertains. Represented as the duration since the video's start. + pertains. Represented as the duration since the video’s start. bounding_box: Required. The rectangle representing the object location on - the frame (i.e. at the time\_offset of the video). + the frame ( i.e. at the time_offset of the video). score: Output only. The confidence that this annotation is positive - for the video at the time\_offset, value in [0, 1], higher + for the video at the time_offset, value in [0, 1], higher means higher positivity confidence. For annotations created by the user the score is 1. When user approves an annotation, the original float score is kept (and not changed to 1). """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingAnnotation) - ), + }, ) _sym_db.RegisterMessage(VideoObjectTrackingAnnotation) BoundingBoxMetricsEntry = _reflection.GeneratedProtocolMessageType( "BoundingBoxMetricsEntry", (_message.Message,), - dict( - ConfidenceMetricsEntry=_reflection.GeneratedProtocolMessageType( + { + "ConfidenceMetricsEntry": _reflection.GeneratedProtocolMessageType( "ConfidenceMetricsEntry", (_message.Message,), - dict( - DESCRIPTOR=_BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY, - __module__="google.cloud.automl_v1beta1.proto.detection_pb2", - __doc__="""Metrics for a single confidence threshold. + { + "DESCRIPTOR": _BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", + "__doc__": """Metrics for a single confidence threshold. Attributes: @@ -647,11 +641,11 @@ Output only. The harmonic mean of recall and precision. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.ConfidenceMetricsEntry) - ), + }, ), - DESCRIPTOR=_BOUNDINGBOXMETRICSENTRY, - __module__="google.cloud.automl_v1beta1.proto.detection_pb2", - __doc__="""Bounding box matching model metrics for a single + "DESCRIPTOR": _BOUNDINGBOXMETRICSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", + "__doc__": """Bounding box matching model metrics for a single intersection-over-union threshold and multiple label match confidence thresholds. @@ -662,15 +656,14 @@ to compute this metrics entry. mean_average_precision: Output only. The mean average precision, most often close to - au\_prc. + au_prc. confidence_metrics_entries: - Output only. Metrics for each label-match - confidence\_threshold from - 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall curve - is derived from them. + Output only. Metrics for each label-match confidence_threshold + from 0.05,0.10,…,0.95,0.96,0.97,0.98,0.99. Precision-recall + curve is derived from them. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BoundingBoxMetricsEntry) - ), + }, ) _sym_db.RegisterMessage(BoundingBoxMetricsEntry) _sym_db.RegisterMessage(BoundingBoxMetricsEntry.ConfidenceMetricsEntry) @@ -678,43 +671,42 @@ ImageObjectDetectionEvaluationMetrics = _reflection.GeneratedProtocolMessageType( "ImageObjectDetectionEvaluationMetrics", (_message.Message,), - dict( - DESCRIPTOR=_IMAGEOBJECTDETECTIONEVALUATIONMETRICS, - __module__="google.cloud.automl_v1beta1.proto.detection_pb2", - __doc__="""Model evaluation metrics for image object detection + { + "DESCRIPTOR": _IMAGEOBJECTDETECTIONEVALUATIONMETRICS, + "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", + "__doc__": """Model evaluation metrics for image object detection problems. Evaluates prediction quality of labeled bounding boxes. Attributes: evaluated_bounding_box_count: - Output only. The total number of bounding boxes (i.e. summed + Output only. The total number of bounding boxes (i.e. summed over all images) the ground truth used to create this evaluation had. bounding_box_metrics_entries: Output only. The bounding boxes match metrics for each Intersection-over-union threshold - 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each label - confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 - pair. + 0.05,0.10,…,0.95,0.96,0.97,0.98,0.99 and each label confidence + threshold 0.05,0.10,…,0.95,0.96,0.97,0.98,0.99 pair. bounding_box_mean_average_precision: Output only. The single metric for bounding boxes evaluation: - the mean\_average\_precision averaged over all - bounding\_box\_metrics\_entries. + the mean_average_precision averaged over all + bounding_box_metrics_entries. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionEvaluationMetrics) - ), + }, ) _sym_db.RegisterMessage(ImageObjectDetectionEvaluationMetrics) VideoObjectTrackingEvaluationMetrics = _reflection.GeneratedProtocolMessageType( "VideoObjectTrackingEvaluationMetrics", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOOBJECTTRACKINGEVALUATIONMETRICS, - __module__="google.cloud.automl_v1beta1.proto.detection_pb2", - __doc__="""Model evaluation metrics for video object tracking + { + "DESCRIPTOR": _VIDEOOBJECTTRACKINGEVALUATIONMETRICS, + "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", + "__doc__": """Model evaluation metrics for video object tracking problems. Evaluates prediction quality of both labeled bounding boxes - and labeled tracks (i.e. series of bounding boxes sharing same label and + and labeled tracks (i.e. series of bounding boxes sharing same label and instance ID). @@ -723,22 +715,21 @@ Output only. The number of video frames used to create this evaluation. evaluated_bounding_box_count: - Output only. The total number of bounding boxes (i.e. summed + Output only. The total number of bounding boxes (i.e. summed over all frames) the ground truth used to create this evaluation had. bounding_box_metrics_entries: Output only. The bounding boxes match metrics for each Intersection-over-union threshold - 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each label - confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 - pair. + 0.05,0.10,…,0.95,0.96,0.97,0.98,0.99 and each label confidence + threshold 0.05,0.10,…,0.95,0.96,0.97,0.98,0.99 pair. bounding_box_mean_average_precision: Output only. The single metric for bounding boxes evaluation: - the mean\_average\_precision averaged over all - bounding\_box\_metrics\_entries. + the mean_average_precision averaged over all + bounding_box_metrics_entries. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingEvaluationMetrics) - ), + }, ) _sym_db.RegisterMessage(VideoObjectTrackingEvaluationMetrics) diff --git a/google/cloud/automl_v1beta1/proto/geometry.proto b/google/cloud/automl_v1beta1/proto/geometry.proto index e5379ab1..d5654aac 100644 --- a/google/cloud/automl_v1beta1/proto/geometry.proto +++ b/google/cloud/automl_v1beta1/proto/geometry.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/geometry_pb2.py b/google/cloud/automl_v1beta1/proto/geometry_pb2.py index 324d76f5..8027aa04 100644 --- a/google/cloud/automl_v1beta1/proto/geometry_pb2.py +++ b/google/cloud/automl_v1beta1/proto/geometry_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/geometry.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -22,13 +19,9 @@ name="google/cloud/automl_v1beta1/proto/geometry.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n0google/cloud/automl_v1beta1/proto/geometry.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"Z\n\x0c\x42oundingPoly\x12J\n\x13normalized_vertices\x18\x02 \x03(\x0b\x32-.google.cloud.automl.v1beta1.NormalizedVertexB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR], + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n0google/cloud/automl_v1beta1/proto/geometry.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"Z\n\x0c\x42oundingPoly\x12J\n\x13normalized_vertices\x18\x02 \x03(\x0b\x32-.google.cloud.automl.v1beta1.NormalizedVertexB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], ) @@ -113,7 +106,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -135,10 +128,10 @@ NormalizedVertex = _reflection.GeneratedProtocolMessageType( "NormalizedVertex", (_message.Message,), - dict( - DESCRIPTOR=_NORMALIZEDVERTEX, - __module__="google.cloud.automl_v1beta1.proto.geometry_pb2", - __doc__="""Required. Horizontal coordinate. + { + "DESCRIPTOR": _NORMALIZEDVERTEX, + "__module__": "google.cloud.automl_v1beta1.proto.geometry_pb2", + "__doc__": """Required. Horizontal coordinate. Attributes: @@ -146,18 +139,18 @@ Required. Vertical coordinate. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.NormalizedVertex) - ), + }, ) _sym_db.RegisterMessage(NormalizedVertex) BoundingPoly = _reflection.GeneratedProtocolMessageType( "BoundingPoly", (_message.Message,), - dict( - DESCRIPTOR=_BOUNDINGPOLY, - __module__="google.cloud.automl_v1beta1.proto.geometry_pb2", - __doc__="""A bounding polygon of a detected object on a plane. On - output both vertices and normalized\_vertices are provided. The polygon + { + "DESCRIPTOR": _BOUNDINGPOLY, + "__module__": "google.cloud.automl_v1beta1.proto.geometry_pb2", + "__doc__": """A bounding polygon of a detected object on a plane. On + output both vertices and normalized_vertices are provided. The polygon is formed by connecting vertices in the order they are listed. @@ -166,7 +159,7 @@ Output only . The bounding polygon normalized vertices. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BoundingPoly) - ), + }, ) _sym_db.RegisterMessage(BoundingPoly) diff --git a/google/cloud/automl_v1beta1/proto/image.proto b/google/cloud/automl_v1beta1/proto/image.proto index 5995efc6..960eaeb0 100644 --- a/google/cloud/automl_v1beta1/proto/image.proto +++ b/google/cloud/automl_v1beta1/proto/image.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,16 +11,16 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; -import "google/api/annotations.proto"; +import "google/api/resource.proto"; import "google/cloud/automl/v1beta1/annotation_spec.proto"; import "google/cloud/automl/v1beta1/classification.proto"; import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; option java_multiple_files = true; @@ -36,7 +36,9 @@ message ImageClassificationDatasetMetadata { } // Dataset metadata specific to image object detection. -message ImageObjectDetectionDatasetMetadata {} +message ImageObjectDetectionDatasetMetadata { + +} // Model metadata for image classification. message ImageClassificationModelMetadata { @@ -65,38 +67,34 @@ message ImageClassificationModelMetadata { // This is the default value. // * `mobile-low-latency-1` - A model that, in addition to providing // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - // and used on a mobile or edge device with TensorFlow - // afterwards. Expected to have low latency, but may have lower - // prediction quality than other models. + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device + // with TensorFlow afterwards. Expected to have low latency, but + // may have lower prediction quality than other models. // * `mobile-versatile-1` - A model that, in addition to providing // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - // and used on a mobile or edge device with TensorFlow - // afterwards. + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device + // with TensorFlow afterwards. // * `mobile-high-accuracy-1` - A model that, in addition to providing // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - // and used on a mobile or edge device with TensorFlow - // afterwards. Expected to have a higher latency, but should - // also have a higher prediction quality than other models. + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device + // with TensorFlow afterwards. Expected to have a higher + // latency, but should also have a higher prediction quality + // than other models. // * `mobile-core-ml-low-latency-1` - A model that, in addition to providing // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - // and used on a mobile device with Core ML afterwards. Expected - // to have low latency, but may have lower prediction quality - // than other models. + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with Core + // ML afterwards. Expected to have low latency, but may have + // lower prediction quality than other models. // * `mobile-core-ml-versatile-1` - A model that, in addition to providing // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - // and used on a mobile device with Core ML afterwards. + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with Core + // ML afterwards. // * `mobile-core-ml-high-accuracy-1` - A model that, in addition to // providing prediction via AutoML API, can also be exported - // (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - // and used on a mobile device with Core ML afterwards. Expected - // to have a higher latency, but should also have a higher - // prediction quality than other models. + // (see [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with + // Core ML afterwards. Expected to have a higher latency, but + // should also have a higher prediction quality than other + // models. string model_type = 7; // Output only. An approximate number of online prediction QPS that can @@ -119,6 +117,21 @@ message ImageObjectDetectionModelMetadata { // * `cloud-low-latency-1` - A model to be used via prediction // calls to AutoML API. Expected to have low latency, but may // have lower prediction quality than other models. + // * `mobile-low-latency-1` - A model that, in addition to providing + // prediction via AutoML API, can also be exported (see + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device + // with TensorFlow afterwards. Expected to have low latency, but + // may have lower prediction quality than other models. + // * `mobile-versatile-1` - A model that, in addition to providing + // prediction via AutoML API, can also be exported (see + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device + // with TensorFlow afterwards. + // * `mobile-high-accuracy-1` - A model that, in addition to providing + // prediction via AutoML API, can also be exported (see + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device + // with TensorFlow afterwards. Expected to have a higher + // latency, but should also have a higher prediction quality + // than other models. string model_type = 1; // Output only. The number of nodes this model is deployed on. A node is an diff --git a/google/cloud/automl_v1beta1/proto/image_pb2.py b/google/cloud/automl_v1beta1/proto/image_pb2.py index 3a0a54a4..62672732 100644 --- a/google/cloud/automl_v1beta1/proto/image_pb2.py +++ b/google/cloud/automl_v1beta1/proto/image_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/image.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,7 +12,7 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1beta1.proto import ( annotation_spec_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2, ) @@ -23,23 +20,21 @@ classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2, ) from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1beta1/proto/image.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\nImageProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n-google/cloud/automl_v1beta1/proto/image.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x37google/cloud/automl_v1beta1/proto/annotation_spec.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1fgoogle/protobuf/timestamp.proto"r\n"ImageClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"%\n#ImageObjectDetectionDatasetMetadata"\xb2\x01\n ImageClassificationModelMetadata\x12\x15\n\rbase_model_id\x18\x01 \x01(\t\x12\x14\n\x0ctrain_budget\x18\x02 \x01(\x03\x12\x12\n\ntrain_cost\x18\x03 \x01(\x03\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12\x12\n\nmodel_type\x18\x07 \x01(\t\x12\x10\n\x08node_qps\x18\r \x01(\x01\x12\x12\n\nnode_count\x18\x0e \x01(\x03"\xbe\x01\n!ImageObjectDetectionModelMetadata\x12\x12\n\nmodel_type\x18\x01 \x01(\t\x12\x12\n\nnode_count\x18\x03 \x01(\x03\x12\x10\n\x08node_qps\x18\x04 \x01(\x01\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03"@\n*ImageClassificationModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03"A\n+ImageObjectDetectionModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03\x42\xb1\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\nImageProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\nImageProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n-google/cloud/automl_v1beta1/proto/image.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a\x37google/cloud/automl_v1beta1/proto/annotation_spec.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"r\n"ImageClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"%\n#ImageObjectDetectionDatasetMetadata"\xb2\x01\n ImageClassificationModelMetadata\x12\x15\n\rbase_model_id\x18\x01 \x01(\t\x12\x14\n\x0ctrain_budget\x18\x02 \x01(\x03\x12\x12\n\ntrain_cost\x18\x03 \x01(\x03\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12\x12\n\nmodel_type\x18\x07 \x01(\t\x12\x10\n\x08node_qps\x18\r \x01(\x01\x12\x12\n\nnode_count\x18\x0e \x01(\x03"\xbe\x01\n!ImageObjectDetectionModelMetadata\x12\x12\n\nmodel_type\x18\x01 \x01(\t\x12\x12\n\nnode_count\x18\x03 \x01(\x03\x12\x10\n\x08node_qps\x18\x04 \x01(\x01\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03"@\n*ImageClassificationModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03"A\n+ImageObjectDetectionModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03\x42\xb1\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\nImageProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], ) @@ -68,7 +63,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -78,8 +73,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=254, - serialized_end=368, + serialized_start=281, + serialized_end=395, ) @@ -98,8 +93,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=370, - serialized_end=407, + serialized_start=397, + serialized_end=434, ) @@ -119,7 +114,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -173,7 +168,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -191,7 +186,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -245,8 +240,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=410, - serialized_end=588, + serialized_start=437, + serialized_end=615, ) @@ -266,7 +261,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -320,7 +315,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -374,8 +369,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=591, - serialized_end=781, + serialized_start=618, + serialized_end=808, ) @@ -403,7 +398,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -413,8 +408,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=783, - serialized_end=847, + serialized_start=810, + serialized_end=874, ) @@ -442,7 +437,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -452,8 +447,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=849, - serialized_end=914, + serialized_start=876, + serialized_end=941, ) _IMAGECLASSIFICATIONDATASETMETADATA.fields_by_name[ @@ -484,10 +479,10 @@ ImageClassificationDatasetMetadata = _reflection.GeneratedProtocolMessageType( "ImageClassificationDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_IMAGECLASSIFICATIONDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.image_pb2", - __doc__="""Dataset metadata that is specific to image classification. + { + "DESCRIPTOR": _IMAGECLASSIFICATIONDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", + "__doc__": """Dataset metadata that is specific to image classification. Attributes: @@ -495,31 +490,31 @@ Required. Type of the classification problem. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageClassificationDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(ImageClassificationDatasetMetadata) ImageObjectDetectionDatasetMetadata = _reflection.GeneratedProtocolMessageType( "ImageObjectDetectionDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_IMAGEOBJECTDETECTIONDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.image_pb2", - __doc__="""Dataset metadata specific to image object detection. + { + "DESCRIPTOR": _IMAGEOBJECTDETECTIONDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", + "__doc__": """Dataset metadata specific to image object detection. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(ImageObjectDetectionDatasetMetadata) ImageClassificationModelMetadata = _reflection.GeneratedProtocolMessageType( "ImageClassificationModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_IMAGECLASSIFICATIONMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.image_pb2", - __doc__="""Model metadata for image classification. + { + "DESCRIPTOR": _IMAGECLASSIFICATIONMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", + "__doc__": """Model metadata for image classification. Attributes: @@ -584,20 +579,20 @@ node_count: Output only. The number of nodes this model is deployed on. A node is an abstraction of a machine resource, which can handle - online prediction QPS as given in the node\_qps field. + online prediction QPS as given in the node_qps field. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageClassificationModelMetadata) - ), + }, ) _sym_db.RegisterMessage(ImageClassificationModelMetadata) ImageObjectDetectionModelMetadata = _reflection.GeneratedProtocolMessageType( "ImageObjectDetectionModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_IMAGEOBJECTDETECTIONMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.image_pb2", - __doc__="""Model metadata specific to image object detection. + { + "DESCRIPTOR": _IMAGEOBJECTDETECTIONMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", + "__doc__": """Model metadata specific to image object detection. Attributes: @@ -609,11 +604,27 @@ other models. \* ``cloud-low-latency-1`` - A model to be used via prediction calls to AutoML API. Expected to have low latency, but may have lower prediction quality than other + models. \* ``mobile-low-latency-1`` - A model that, in + addition to providing prediction via AutoML API, can also be + exported (see [AutoMl.ExportModel][google.cloud.automl.v1beta1 + .AutoMl.ExportModel]) and used on a mobile or edge device with + TensorFlow afterwards. Expected to have low latency, but may + have lower prediction quality than other models. \* ``mobile- + versatile-1`` - A model that, in addition to providing + prediction via AutoML API, can also be exported (see [AutoMl.E + xportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. \* ``mobile-high-accuracy-1`` - A model that, in + addition to providing prediction via AutoML API, can also be + exported (see [AutoMl.ExportModel][google.cloud.automl.v1beta1 + .AutoMl.ExportModel]) and used on a mobile or edge device with + TensorFlow afterwards. Expected to have a higher latency, but + should also have a higher prediction quality than other models. node_count: Output only. The number of nodes this model is deployed on. A node is an abstraction of a machine resource, which can handle - online prediction QPS as given in the qps\_per\_node field. + online prediction QPS as given in the qps_per_node field. node_qps: Output only. An approximate number of online prediction QPS that can be supported by this model per each node on which it @@ -623,12 +634,12 @@ stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. train_budget_milli_node_hours: The train budget of creating this model, expressed in milli - node hours i.e. 1,000 value in this field means 1 node hour. + node hours i.e. 1,000 value in this field means 1 node hour. The actual ``train_cost`` will be equal or less than this value. If further model training ceases to provide any improvements, it will stop without using full budget and the - stop\_reason will be ``MODEL_CONVERGED``. Note, node\_hour = - actual\_hour \* number\_of\_nodes\_invovled. For model type + stop_reason will be ``MODEL_CONVERGED``. Note, node_hour = + actual_hour \* number_of_nodes_invovled. For model type ``cloud-high-accuracy-1``\ (default) and ``cloud-low- latency-1``, the train budget must be between 20,000 and 900,000 milli node hours, inclusive. The default value is 216, @@ -641,21 +652,21 @@ represents one day in wall time. train_cost_milli_node_hours: Output only. The actual train cost of creating this model, - expressed in milli node hours, i.e. 1,000 value in this field + expressed in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed the train budget. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata) - ), + }, ) _sym_db.RegisterMessage(ImageObjectDetectionModelMetadata) ImageClassificationModelDeploymentMetadata = _reflection.GeneratedProtocolMessageType( "ImageClassificationModelDeploymentMetadata", (_message.Message,), - dict( - DESCRIPTOR=_IMAGECLASSIFICATIONMODELDEPLOYMENTMETADATA, - __module__="google.cloud.automl_v1beta1.proto.image_pb2", - __doc__="""Model deployment metadata specific to Image + { + "DESCRIPTOR": _IMAGECLASSIFICATIONMODELDEPLOYMENTMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", + "__doc__": """Model deployment metadata specific to Image Classification. @@ -663,22 +674,22 @@ node_count: Input only. The number of nodes to deploy the model on. A node is an abstraction of a machine resource, which can handle - online prediction QPS as given in the model's [node\_qps][goo - gle.cloud.automl.v1beta1.ImageClassificationModelMetadata.node - \_qps]. Must be between 1 and 100, inclusive on both ends. + online prediction QPS as given in the model’s [node_qps][goog + le.cloud.automl.v1beta1.ImageClassificationModelMetadata.node_ + qps]. Must be between 1 and 100, inclusive on both ends. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageClassificationModelDeploymentMetadata) - ), + }, ) _sym_db.RegisterMessage(ImageClassificationModelDeploymentMetadata) ImageObjectDetectionModelDeploymentMetadata = _reflection.GeneratedProtocolMessageType( "ImageObjectDetectionModelDeploymentMetadata", (_message.Message,), - dict( - DESCRIPTOR=_IMAGEOBJECTDETECTIONMODELDEPLOYMENTMETADATA, - __module__="google.cloud.automl_v1beta1.proto.image_pb2", - __doc__="""Model deployment metadata specific to Image Object + { + "DESCRIPTOR": _IMAGEOBJECTDETECTIONMODELDEPLOYMENTMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", + "__doc__": """Model deployment metadata specific to Image Object Detection. @@ -686,13 +697,13 @@ node_count: Input only. The number of nodes to deploy the model on. A node is an abstraction of a machine resource, which can handle - online prediction QPS as given in the model's [qps\_per\_node - ][google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadat - a.qps\_per\_node]. Must be between 1 and 100, inclusive on - both ends. + online prediction QPS as given in the model’s [qps_per_node][ + google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata. + qps_per_node]. Must be between 1 and 100, inclusive on both + ends. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata) - ), + }, ) _sym_db.RegisterMessage(ImageObjectDetectionModelDeploymentMetadata) diff --git a/google/cloud/automl_v1beta1/proto/io.proto b/google/cloud/automl_v1beta1/proto/io.proto index 5cc61c5e..a9979383 100644 --- a/google/cloud/automl_v1beta1/proto/io.proto +++ b/google/cloud/automl_v1beta1/proto/io.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/io_pb2.py b/google/cloud/automl_v1beta1/proto/io_pb2.py index c2fb6138..44ea8ca2 100644 --- a/google/cloud/automl_v1beta1/proto/io_pb2.py +++ b/google/cloud/automl_v1beta1/proto/io_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/io.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -22,13 +19,9 @@ name="google/cloud/automl_v1beta1/proto/io.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n*google/cloud/automl_v1beta1/proto/io.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\x92\x02\n\x0bInputConfig\x12<\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSourceH\x00\x12\x46\n\x0f\x62igquery_source\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.BigQuerySourceH\x00\x12\x44\n\x06params\x18\x02 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.InputConfig.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06source"\xa9\x01\n\x17\x42\x61tchPredictInputConfig\x12<\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSourceH\x00\x12\x46\n\x0f\x62igquery_source\x18\x02 \x01(\x0b\x32+.google.cloud.automl.v1beta1.BigQuerySourceH\x00\x42\x08\n\x06source"Q\n\x13\x44ocumentInputConfig\x12:\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSource"\xb7\x01\n\x0cOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\xc3\x01\n\x18\x42\x61tchPredictOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\xcf\x02\n\x17ModelExportOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12\x46\n\x0fgcr_destination\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcrDestinationH\x00\x12\x14\n\x0cmodel_format\x18\x04 \x01(\t\x12P\n\x06params\x18\x02 \x03(\x0b\x32@.google.cloud.automl.v1beta1.ModelExportOutputConfig.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\r\n\x0b\x64\x65stination"\x86\x01\n#ExportEvaluatedExamplesOutputConfig\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\x1f\n\tGcsSource\x12\x12\n\ninput_uris\x18\x01 \x03(\t"#\n\x0e\x42igQuerySource\x12\x11\n\tinput_uri\x18\x01 \x01(\t"+\n\x0eGcsDestination\x12\x19\n\x11output_uri_prefix\x18\x01 \x01(\t")\n\x13\x42igQueryDestination\x12\x12\n\noutput_uri\x18\x01 \x01(\t"$\n\x0eGcrDestination\x12\x12\n\noutput_uri\x18\x01 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR], + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n*google/cloud/automl_v1beta1/proto/io.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\x92\x02\n\x0bInputConfig\x12<\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSourceH\x00\x12\x46\n\x0f\x62igquery_source\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.BigQuerySourceH\x00\x12\x44\n\x06params\x18\x02 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.InputConfig.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06source"\xa9\x01\n\x17\x42\x61tchPredictInputConfig\x12<\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSourceH\x00\x12\x46\n\x0f\x62igquery_source\x18\x02 \x01(\x0b\x32+.google.cloud.automl.v1beta1.BigQuerySourceH\x00\x42\x08\n\x06source"Q\n\x13\x44ocumentInputConfig\x12:\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSource"\xb7\x01\n\x0cOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\xc3\x01\n\x18\x42\x61tchPredictOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\xcf\x02\n\x17ModelExportOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12\x46\n\x0fgcr_destination\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcrDestinationH\x00\x12\x14\n\x0cmodel_format\x18\x04 \x01(\t\x12P\n\x06params\x18\x02 \x03(\x0b\x32@.google.cloud.automl.v1beta1.ModelExportOutputConfig.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\r\n\x0b\x64\x65stination"\x86\x01\n#ExportEvaluatedExamplesOutputConfig\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\x1f\n\tGcsSource\x12\x12\n\ninput_uris\x18\x01 \x03(\t"#\n\x0e\x42igQuerySource\x12\x11\n\tinput_uri\x18\x01 \x01(\t"+\n\x0eGcsDestination\x12\x19\n\x11output_uri_prefix\x18\x01 \x01(\t")\n\x13\x42igQueryDestination\x12\x12\n\noutput_uri\x18\x01 \x01(\t"$\n\x0eGcrDestination\x12\x12\n\noutput_uri\x18\x01 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], ) @@ -48,7 +41,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -66,7 +59,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -79,7 +72,7 @@ extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -151,7 +144,7 @@ ), ], extensions=[], - nested_types=[_INPUTCONFIG_PARAMSENTRY], + nested_types=[_INPUTCONFIG_PARAMSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -164,7 +157,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=106, serialized_end=380, @@ -229,7 +222,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=383, serialized_end=552, @@ -260,7 +253,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -333,7 +326,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=638, serialized_end=821, @@ -398,7 +391,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=824, serialized_end=1019, @@ -421,7 +414,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -439,7 +432,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -452,7 +445,7 @@ extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -513,7 +506,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -542,7 +535,7 @@ ), ], extensions=[], - nested_types=[_MODELEXPORTOUTPUTCONFIG_PARAMSENTRY], + nested_types=[_MODELEXPORTOUTPUTCONFIG_PARAMSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -555,7 +548,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=1022, serialized_end=1357, @@ -586,7 +579,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -602,7 +595,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=1360, serialized_end=1494, @@ -633,7 +626,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -664,7 +657,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -672,7 +665,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -703,7 +696,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -711,7 +704,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -742,7 +735,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -750,7 +743,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -781,7 +774,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -789,7 +782,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -918,40 +911,40 @@ InputConfig = _reflection.GeneratedProtocolMessageType( "InputConfig", (_message.Message,), - dict( - ParamsEntry=_reflection.GeneratedProtocolMessageType( + { + "ParamsEntry": _reflection.GeneratedProtocolMessageType( "ParamsEntry", (_message.Message,), - dict( - DESCRIPTOR=_INPUTCONFIG_PARAMSENTRY, - __module__="google.cloud.automl_v1beta1.proto.io_pb2" + { + "DESCRIPTOR": _INPUTCONFIG_PARAMSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.InputConfig.ParamsEntry) - ), + }, ), - DESCRIPTOR=_INPUTCONFIG, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""Input configuration for ImportData Action. + "DESCRIPTOR": _INPUTCONFIG, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """Input configuration for ImportData Action. - The format of input depends on dataset\_metadata the Dataset into which + The format of input depends on dataset_metadata the Dataset into which the import is happening has. As input source the - [gcs\_source][google.cloud.automl.v1beta1.InputConfig.gcs\_source] is + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is expected, unless specified otherwise. Additionally any input .CSV file by itself must be 100MB or smaller, unless specified otherwise. If an - "example" file (that is, image, video etc.) with identical content (even - if it had different GCS\_FILE\_PATH) is mentioned multiple times, then - its label, bounding boxes etc. are appended. The same file should be - always provided with the same ML\_USE and GCS\_FILE\_PATH, if it is not, - then these values are nondeterministically selected from the given ones. + “example” file (that is, image, video etc.) with identical content (even + if it had different GCS_FILE_PATH) is mentioned multiple times, then its + label, bounding boxes etc. are appended. The same file should be always + provided with the same ML_USE and GCS_FILE_PATH, if it is not, then + these values are nondeterministically selected from the given ones. The formats are represented in EBNF with commas being literal and with non-terminal symbols defined near the end of this comment. The formats are: - For Image Classification: CSV file(s) with each line in format: - ML\_USE,GCS\_FILE\_PATH,LABEL,LABEL,... GCS\_FILE\_PATH leads to - image of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, - .WEBP, .BMP, .TIFF, .ICO For MULTICLASS classification type, at most - one LABEL is allowed per image. If an image has not yet been labeled, + ML_USE,GCS_FILE_PATH,LABEL,LABEL,… GCS_FILE_PATH leads to image of up + to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP, + .BMP, .TIFF, .ICO For MULTICLASS classification type, at most one + LABEL is allowed per image. If an image has not yet been labeled, then it should be mentioned just once with no LABEL. Some sample rows: TRAIN,gs://folder/image1.jpg,daisy TEST,gs://folder/image2.jpg,dandelion,tulip,rose @@ -959,66 +952,63 @@ UNASSIGNED,gs://folder/image4.jpg - For Image Object Detection: CSV file(s) with each line in format: - ML\_USE,GCS\_FILE\_PATH,(LABEL,BOUNDING\_BOX \| ,,,,,,,) - GCS\_FILE\_PATH leads to image of up to 30MB in size. Supported - extensions: .JPEG, .GIF, .PNG. Each image is assumed to be - exhaustively labeled. The minimum allowed BOUNDING\_BOX edge length - is 0.01, and no more than 500 BOUNDING\_BOX-es per image are allowed - (one BOUNDING\_BOX is defined per line). If an image has not yet been - labeled, then it should be mentioned just once with no LABEL and the - ",,,,,,," in place of the BOUNDING\_BOX. For images which are known - to not contain any bounding boxes, they should be labelled explictly - as "NEGATIVE\_IMAGE", followed by ",,,,,,," in place of the - BOUNDING\_BOX. Sample rows: - TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, + ML_USE,GCS_FILE_PATH,(LABEL,BOUNDING_BOX \| ,,,,,,,) GCS_FILE_PATH + leads to image of up to 30MB in size. Supported extensions: .JPEG, + .GIF, .PNG. Each image is assumed to be exhaustively labeled. The + minimum allowed BOUNDING_BOX edge length is 0.01, and no more than + 500 BOUNDING_BOX-es per image are allowed (one BOUNDING_BOX is + defined per line). If an image has not yet been labeled, then it + should be mentioned just once with no LABEL and the “,,,,,,,” in + place of the BOUNDING_BOX. For images which are known to not contain + any bounding boxes, they should be labelled explictly as + “NEGATIVE_IMAGE”, followed by “,,,,,,,” in place of the BOUNDING_BOX. + Sample rows: TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 TEST,gs://folder/im3.png,,,,,,,,, - TRAIN,gs://folder/im4.png,NEGATIVE\_IMAGE,,,,,,,,, + TRAIN,gs://folder/im4.png,NEGATIVE_IMAGE,,,,,,,,, - For Video Classification: CSV file(s) with each line in format: - ML\_USE,GCS\_FILE\_PATH where ML\_USE VALIDATE value should not be - used. The GCS\_FILE\_PATH should lead to another .csv file which - describes examples that have given ML\_USE, using the following row - format: - GCS\_FILE\_PATH,(LABEL,TIME\_SEGMENT\_START,TIME\_SEGMENT\_END \| ,,) - Here GCS\_FILE\_PATH leads to a video of up to 50GB in size and up to - 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. - TIME\_SEGMENT\_START and TIME\_SEGMENT\_END must be within the length - of the video, and end has to be after the start. Any segment of a - video which has one or more labels on it, is considered a hard - negative for all other labels. Any segment with no labels on it is - considered to be unknown. If a whole video is unknown, then it shuold - be mentioned just once with ",," in place of LABEL, - TIME\_SEGMENT\_START,TIME\_SEGMENT\_END. Sample top level CSV file: - TRAIN,gs://folder/train\_videos.csv TEST,gs://folder/test\_videos.csv - UNASSIGNED,gs://folder/other\_videos.csv Sample rows of a CSV file - for a particular ML\_USE: gs://folder/video1.avi,car,120,180.000021 + ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be used. + The GCS_FILE_PATH should lead to another .csv file which describes + examples that have given ML_USE, using the following row format: + GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END \| ,,) Here + GCS_FILE_PATH leads to a video of up to 50GB in size and up to 3h + duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. + TIME_SEGMENT_START and TIME_SEGMENT_END must be within the length of + the video, and end has to be after the start. Any segment of a video + which has one or more labels on it, is considered a hard negative for + all other labels. Any segment with no labels on it is considered to + be unknown. If a whole video is unknown, then it shuold be mentioned + just once with “,,” in place of LABEL, + TIME_SEGMENT_START,TIME_SEGMENT_END. Sample top level CSV file: + TRAIN,gs://folder/train_videos.csv TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv Sample rows of a CSV file for + a particular ML_USE: gs://folder/video1.avi,car,120,180.000021 gs://folder/video1.avi,bike,150,180.000021 gs://folder/vid2.avi,car,0,60.5 gs://folder/vid3.avi,,, - For Video Object Tracking: CSV file(s) with each line in format: - ML\_USE,GCS\_FILE\_PATH where ML\_USE VALIDATE value should not be - used. The GCS\_FILE\_PATH should lead to another .csv file which - describes examples that have given ML\_USE, using one of the - following row format: - GCS\_FILE\_PATH,LABEL,[INSTANCE\_ID],TIMESTAMP,BOUNDING\_BOX or - GCS\_FILE\_PATH,,,,,,,,,, Here GCS\_FILE\_PATH leads to a video of up - to 50GB in size and up to 3h duration. Supported extensions: .MOV, - .MPEG4, .MP4, .AVI. Providing INSTANCE\_IDs can help to obtain a + ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be used. + The GCS_FILE_PATH should lead to another .csv file which describes + examples that have given ML_USE, using one of the following row + format: GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX or + GCS_FILE_PATH,,,,,,,,,, Here GCS_FILE_PATH leads to a video of up to + 50GB in size and up to 3h duration. Supported extensions: .MOV, + .MPEG4, .MP4, .AVI. Providing INSTANCE_IDs can help to obtain a better model. When a specific labeled entity leaves the video frame, and shows up afterwards it is not required, albeit preferable, that - the same INSTANCE\_ID is given to it. TIMESTAMP must be within the - length of the video, the BOUNDING\_BOX is assumed to be drawn on the - closest video's frame to the TIMESTAMP. Any mentioned by the + the same INSTANCE_ID is given to it. TIMESTAMP must be within the + length of the video, the BOUNDING_BOX is assumed to be drawn on the + closest video’s frame to the TIMESTAMP. Any mentioned by the TIMESTAMP frame is expected to be exhaustively labeled and no more - than 500 BOUNDING\_BOX-es per frame are allowed. If a whole video is - unknown, then it should be mentioned just once with ",,,,,,,,,," in - place of LABEL, [INSTANCE\_ID],TIMESTAMP,BOUNDING\_BOX. Sample top - level CSV file: TRAIN,gs://folder/train\_videos.csv - TEST,gs://folder/test\_videos.csv - UNASSIGNED,gs://folder/other\_videos.csv Seven sample rows of a CSV - file for a particular ML\_USE: + than 500 BOUNDING_BOX-es per frame are allowed. If a whole video is + unknown, then it should be mentioned just once with “,,,,,,,,,,” in + place of LABEL, [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX. Sample top + level CSV file: TRAIN,gs://folder/train_videos.csv + TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv Seven sample rows of a CSV + file for a particular ML_USE: gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 @@ -1027,129 +1017,168 @@ gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, gs://folder/video2.avi,,,,,,,,,,, - For Text Extraction: CSV file(s) with each line in format: - ML\_USE,GCS\_FILE\_PATH GCS\_FILE\_PATH leads to a .JSONL (that is, - JSON Lines) file which either imports text in-line or as documents. - Any given .JSONL file must be 100MB or smaller. The in-line .JSONL - file contains, per line, a proto that wraps a TextSnippet proto (in - json representation) followed by one or more AnnotationPayload protos - (called annotations), which have display\_name and text\_extraction + ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .JSONL (that is, JSON + Lines) file which either imports text in-line or as documents. Any + given .JSONL file must be 100MB or smaller. The in-line .JSONL file + contains, per line, a proto that wraps a TextSnippet proto (in json + representation) followed by one or more AnnotationPayload protos + (called annotations), which have display_name and text_extraction detail populated. The given text is expected to be annotated exhaustively, for example, if you look for animals and text contains - "dolphin" that is not labeled, then "dolphin" is assumed to not be an + “dolphin” that is not labeled, then “dolphin” is assumed to not be an animal. Any given text snippet content must be 10KB or smaller, and also be UTF-8 NFC encoded (ASCII already is). The document .JSONL file contains, per line, a proto that wraps a Document proto. The - Document proto must have either document\_text or input\_config set. - In document\_text case, the Document proto may also contain the - spatial information of the document, including layout, document - dimension and page number. In input\_config case, only PDF documents - are supported now, and each document may be up to 2MB large. - Currently, annotations on documents cannot be specified at import. - Three sample CSV rows: TRAIN,gs://folder/file1.jsonl - VALIDATE,gs://folder/file2.jsonl TEST,gs://folder/file3.jsonl + Document proto must have either document_text or input_config set. In + document_text case, the Document proto may also contain the spatial + information of the document, including layout, document dimension and + page number. In input_config case, only PDF documents are supported + now, and each document may be up to 2MB large. Currently, annotations + on documents cannot be specified at import. Three sample CSV rows: + TRAIN,gs://folder/file1.jsonl VALIDATE,gs://folder/file2.jsonl + TEST,gs://folder/file3.jsonl Sample in-line JSON Lines file for + entity extraction (presented here with artificial line breaks, but + the only actual line break is denoted by :raw-latex:`\n`).: { + “document”: { “document_text”: {“content”: “dog cat”} “layout”: [ { + “text_segment”: { “start_offset”: 0, “end_offset”: 3, }, + “page_number”: 1, “bounding_poly”: { “normalized_vertices”: [ {“x”: + 0.1, “y”: 0.1}, {“x”: 0.1, “y”: 0.3}, {“x”: 0.3, “y”: 0.3}, {“x”: + 0.3, “y”: 0.1}, ], }, “text_segment_type”: TOKEN, }, { + “text_segment”: { “start_offset”: 4, “end_offset”: 7, }, + “page_number”: 1, “bounding_poly”: { “normalized_vertices”: [ {“x”: + 0.4, “y”: 0.1}, {“x”: 0.4, “y”: 0.3}, {“x”: 0.8, “y”: 0.3}, {“x”: + 0.8, “y”: 0.1}, ], }, “text_segment_type”: TOKEN, }], + “document_dimensions”: { “width”: 8.27, “height”: 11.69, “unit”: + INCH, } “page_count”: 1, }, “annotations”: [ { “display_name”: + “animal”, “text_extraction”: {“text_segment”: {“start_offset”: 0, + “end_offset”: 3}} }, { “display_name”: “animal”, “text_extraction”: + {“text_segment”: {“start_offset”: 4, “end_offset”: 7}} } ], + }:raw-latex:`\n + { + "text_snippet": { + "content": "This dog is good." + }, + "annotations": [ + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 5, "end_offset": 8} + } + } + ] + }` Sample document JSON Lines file (presented here with + artificial line breaks, but the only actual line break is denoted by + :raw-latex:`\n`).: { “document”: { “input_config”: { “gcs_source”: { + “input_uris”: [ “gs://folder/document1.pdf” ] } } } }:raw-latex:`\n + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] + } + } + } + }` - For Text Classification: CSV file(s) with each line in format: - ML\_USE,(TEXT\_SNIPPET \| GCS\_FILE\_PATH),LABEL,LABEL,... - TEXT\_SNIPPET and GCS\_FILE\_PATH are distinguished by a pattern. If - the column content is a valid gcs file path, i.e. prefixed by - "gs://", it will be treated as a GCS\_FILE\_PATH, else if the content - is enclosed within double quotes (""), it is treated as a - TEXT\_SNIPPET. In the GCS\_FILE\_PATH case, the path must lead to a - .txt file with UTF-8 encoding, for example, - "gs://folder/content.txt", and the content in it is extracted as a - text snippet. In TEXT\_SNIPPET case, the column content excluding - quotes is treated as to be imported text snippet. In both cases, the - text snippet/file size must be within 128kB. Maximum 100 unique - labels are allowed per CSV row. Sample rows: TRAIN,"They have bad - food and very rude",RudeService,BadFood - TRAIN,gs://folder/content.txt,SlowService TEST,"Typically always bad - service there.",RudeService VALIDATE,"Stomach ache to go.",BadFood + ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),LABEL,LABEL,… TEXT_SNIPPET and + GCS_FILE_PATH are distinguished by a pattern. If the column content + is a valid gcs file path, i.e. prefixed by “gs://”, it will be + treated as a GCS_FILE_PATH, else if the content is enclosed within + double quotes ("“), it is treated as a TEXT_SNIPPET. In the + GCS_FILE_PATH case, the path must lead to a .txt file with UTF-8 + encoding, for example,”gs://folder/content.txt“, and the content in + it is extracted as a text snippet. In TEXT_SNIPPET case, the column + content excluding quotes is treated as to be imported text snippet. + In both cases, the text snippet/file size must be within 128kB. + Maximum 100 unique labels are allowed per CSV row. Sample rows: + TRAIN,”They have bad food and very rude“,RudeService,BadFood + TRAIN,gs://folder/content.txt,SlowService TEST,”Typically always bad + service there.“,RudeService VALIDATE,”Stomach ache to go.",BadFood - For Text Sentiment: CSV file(s) with each line in format: - ML\_USE,(TEXT\_SNIPPET \| GCS\_FILE\_PATH),SENTIMENT TEXT\_SNIPPET - and GCS\_FILE\_PATH are distinguished by a pattern. If the column - content is a valid gcs file path, that is, prefixed by "gs://", it is - treated as a GCS\_FILE\_PATH, otherwise it is treated as a - TEXT\_SNIPPET. In the GCS\_FILE\_PATH case, the path must lead to a - .txt file with UTF-8 encoding, for example, - "gs://folder/content.txt", and the content in it is extracted as a - text snippet. In TEXT\_SNIPPET case, the column content itself is - treated as to be imported text snippet. In both cases, the text - snippet must be up to 500 characters long. Sample rows: - TRAIN,"@freewrytin this is way too good for your product",2 TRAIN,"I - need this product so bad",3 TEST,"Thank you for this product.",4 - VALIDATE,gs://folder/content.txt,2 + ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),SENTIMENT TEXT_SNIPPET and + GCS_FILE_PATH are distinguished by a pattern. If the column content + is a valid gcs file path, that is, prefixed by “gs://”, it is treated + as a GCS_FILE_PATH, otherwise it is treated as a TEXT_SNIPPET. In the + GCS_FILE_PATH case, the path must lead to a .txt file with UTF-8 + encoding, for example, “gs://folder/content.txt”, and the content in + it is extracted as a text snippet. In TEXT_SNIPPET case, the column + content itself is treated as to be imported text snippet. In both + cases, the text snippet must be up to 500 characters long. Sample + rows: TRAIN,“@freewrytin this is way too good for your product”,2 + TRAIN,“I need this product so bad”,3 TEST,“Thank you for this + product.”,4 VALIDATE,gs://folder/content.txt,2 - For Tables: Either - [gcs\_source][google.cloud.automl.v1beta1.InputConfig.gcs\_source] or + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or - [bigquery\_source][google.cloud.automl.v1beta1.InputConfig.bigquery\_source] + [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source] can be used. All inputs is concatenated into a single - [primary\_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary\_table\_name] - For gcs\_source: CSV file(s), where the first row of the first file is + [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_name] + For gcs_source: CSV file(s), where the first row of the first file is the header, containing unique column names. If the first row of a subsequent file is the same as the header, then it is also treated as a header. All other rows contain values for the corresponding columns. Each .CSV file by itself must be 10GB or smaller, and their total size must be 100GB or smaller. First three sample rows of a CSV file: - "Id","First Name","Last Name","Dob","Addresses" + “Id”,“First Name”,“Last Name”,“Dob”,“Addresses” - "1","John","Doe","1968-01-22","[{"status":"current","address":"123\_First\_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456\_Main\_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" + “1”,“John”,“Doe”,“1968-01-22”,“[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]” - "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789\_Any\_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321\_Main\_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} - For bigquery\_source: An URI of a BigQuery table. The user data size of + “2”,“Jane”,“Doe”,“1980-10-16”,“[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} + For bigquery_source: An URI of a BigQuery table. The user data size of the BigQuery table must be 100GB or smaller. An imported table must have between 2 and 1,000 columns, inclusive, and between 1000 and 100,000,000 rows, inclusive. There are at most 5 import data running in parallel. - Definitions: ML\_USE = "TRAIN" \| "VALIDATE" \| "TEST" \| "UNASSIGNED" + Definitions: ML_USE =”TRAIN" \| “VALIDATE” \| “TEST” \| “UNASSIGNED” Describes how the given example (file) should be used for model - training. "UNASSIGNED" can be used when user has no preference. - GCS\_FILE\_PATH = A path to file on GCS, e.g. "gs://folder/image1.png". - LABEL = A display name of an object on an image, video etc., e.g. "dog". + training. “UNASSIGNED” can be used when user has no preference. + GCS_FILE_PATH = A path to file on GCS, e.g. “gs://folder/image1.png”. + LABEL = A display name of an object on an image, video etc., e.g. “dog”. Must be up to 32 characters long and can consist only of ASCII Latin - letters A-Z and a-z, underscores(\_), and ASCII digits 0-9. For each - label an AnnotationSpec is created which display\_name becomes the - label; AnnotationSpecs are given back in predictions. INSTANCE\_ID = A - positive integer that identifies a specific instance of a labeled entity - on an example. Used e.g. to track two cars on a video while being able - to tell apart which one is which. BOUNDING\_BOX = - VERTEX,VERTEX,VERTEX,VERTEX \| VERTEX,,,VERTEX,, A rectangle parallel to - the frame of the example (image, video). If 4 vertices are given they - are connected by edges in the order provided, if 2 are given they are - recognized as diagonally opposite vertices of the rectangle. VERTEX = - COORDINATE,COORDINATE First coordinate is horizontal (x), the second is - vertical (y). COORDINATE = A float in 0 to 1 range, relative to total - length of image or video in given dimension. For fractions the leading - non-decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is in top left. - TIME\_SEGMENT\_START = TIME\_OFFSET Expresses a beginning, inclusive, of - a time segment within an example that has a time dimension (e.g. video). - TIME\_SEGMENT\_END = TIME\_OFFSET Expresses an end, exclusive, of a time - segment within an example that has a time dimension (e.g. video). - TIME\_OFFSET = A number of seconds as measured from the start of an - example (e.g. video). Fractions are allowed, up to a microsecond - precision. "inf" is allowed, and it means the end of the example. - TEXT\_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed - within double quotes (""). SENTIMENT = An integer between 0 and - Dataset.text\_sentiment\_dataset\_metadata.sentiment\_max (inclusive). + letters A-Z and a-z, underscores(_), and ASCII digits 0-9. For each + label an AnnotationSpec is created which display_name becomes the label; + AnnotationSpecs are given back in predictions. INSTANCE_ID = A positive + integer that identifies a specific instance of a labeled entity on an + example. Used e.g. to track two cars on a video while being able to tell + apart which one is which. BOUNDING_BOX = VERTEX,VERTEX,VERTEX,VERTEX \| + VERTEX,,,VERTEX,, A rectangle parallel to the frame of the example + (image, video). If 4 vertices are given they are connected by edges in + the order provided, if 2 are given they are recognized as diagonally + opposite vertices of the rectangle. VERTEX = COORDINATE,COORDINATE First + coordinate is horizontal (x), the second is vertical (y). COORDINATE = A + float in 0 to 1 range, relative to total length of image or video in + given dimension. For fractions the leading non-decimal 0 can be omitted + (i.e. 0.3 = .3). Point 0,0 is in top left. TIME_SEGMENT_START = + TIME_OFFSET Expresses a beginning, inclusive, of a time segment within + an example that has a time dimension (e.g. video). TIME_SEGMENT_END = + TIME_OFFSET Expresses an end, exclusive, of a time segment within an + example that has a time dimension (e.g. video). TIME_OFFSET = A number + of seconds as measured from the start of an example (e.g. video). + Fractions are allowed, up to a microsecond precision. “inf” is allowed, + and it means the end of the example. TEXT_SNIPPET = A content of a text + snippet, UTF-8 encoded, enclosed within double quotes ("“). SENTIMENT = + An integer between 0 and + Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive). Describes the ordinal of the sentiment - higher value means a more - positive sentiment. All the values are completely relative, i.e. neither - 0 needs to mean a negative or neutral sentiment nor sentiment\_max needs + positive sentiment. All the values are completely relative, i.e. neither + 0 needs to mean a negative or neutral sentiment nor sentiment_max needs to mean a positive one - it is just required that 0 is the least - positive sentiment in the data, and sentiment\_max is the most positive - one. The SENTIMENT shouldn't be confused with "score" or "magnitude" - from the previous Natural Language Sentiment Analysis API. All SENTIMENT - values between 0 and sentiment\_max must be represented in the imported - data. On prediction the same 0 to sentiment\_max range will be used. The + positive sentiment in the data, and sentiment_max is the most positive + one. The SENTIMENT shouldn’t be confused with”score" or “magnitude” from + the previous Natural Language Sentiment Analysis API. All SENTIMENT + values between 0 and sentiment_max must be represented in the imported + data. On prediction the same 0 to sentiment_max range will be used. The difference between neighboring sentiment values needs not to be uniform, - e.g. 1 and 2 may be similar whereas the difference between 2 and 3 may + e.g. 1 and 2 may be similar whereas the difference between 2 and 3 may be huge. - Errors: If any of the provided CSV files can't be parsed or if more than + Errors: If any of the provided CSV files can’t be parsed or if more than certain percent of CSV rows cannot be processed then the operation fails and nothing is imported. Regardless of overall success or failure the per-row failures, up to a certain count cap, is listed in - Operation.metadata.partial\_failures. + Operation.metadata.partial_failures. Attributes: @@ -1157,7 +1186,7 @@ The source of the input. gcs_source: The Google Cloud Storage location for the input content. In - ImportData, the gcs\_source points to a csv with structure + ImportData, the gcs_source points to a csv with structure described in the comment. bigquery_source: The BigQuery location for the input content. @@ -1167,11 +1196,11 @@ characters long. - For Tables: ``schema_inference_version`` - (integer) Required. The version of the algorithm that should be used for the initial inference of the schema - (columns' DataTypes) of the table the data is being - imported into. Allowed values: "1". + (columns’ DataTypes) of the table the data is being + imported into. Allowed values: “1”. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.InputConfig) - ), + }, ) _sym_db.RegisterMessage(InputConfig) _sym_db.RegisterMessage(InputConfig.ParamsEntry) @@ -1179,14 +1208,14 @@ BatchPredictInputConfig = _reflection.GeneratedProtocolMessageType( "BatchPredictInputConfig", (_message.Message,), - dict( - DESCRIPTOR=_BATCHPREDICTINPUTCONFIG, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""Input configuration for BatchPredict Action. + { + "DESCRIPTOR": _BATCHPREDICTINPUTCONFIG, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """Input configuration for BatchPredict Action. The format of input depends on the ML problem of the model used for prediction. As input source the - [gcs\_source][google.cloud.automl.v1beta1.InputConfig.gcs\_source] is + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is expected, unless specified otherwise. The formats are represented in EBNF with commas being literal and with @@ -1194,124 +1223,124 @@ are: - For Image Classification: CSV file(s) with each line having just a - single column: GCS\_FILE\_PATH which leads to image of up to 30MB in + single column: GCS_FILE_PATH which leads to image of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in the Batch predict output. Three sample rows: gs://folder/image1.jpeg gs://folder/image2.gif gs://folder/image3.png - For Image Object Detection: CSV file(s) with each line having just a - single column: GCS\_FILE\_PATH which leads to image of up to 30MB in + single column: GCS_FILE_PATH which leads to image of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in the Batch predict output. Three sample rows: gs://folder/image1.jpeg gs://folder/image2.gif gs://folder/image3.png - For Video Classification: CSV file(s) with each line in format: - GCS\_FILE\_PATH,TIME\_SEGMENT\_START,TIME\_SEGMENT\_END - GCS\_FILE\_PATH leads to video of up to 50GB in size and up to 3h - duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. - TIME\_SEGMENT\_START and TIME\_SEGMENT\_END must be within the length - of the video, and end has to be after the start. Three sample rows: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH leads + to video of up to 50GB in size and up to 3h duration. Supported + extensions: .MOV, .MPEG4, .MP4, .AVI. TIME_SEGMENT_START and + TIME_SEGMENT_END must be within the length of the video, and end has + to be after the start. Three sample rows: gs://folder/video1.mp4,10,40 gs://folder/video1.mp4,20,60 gs://folder/vid2.mov,0,inf - For Video Object Tracking: CSV file(s) with each line in format: - GCS\_FILE\_PATH,TIME\_SEGMENT\_START,TIME\_SEGMENT\_END - GCS\_FILE\_PATH leads to video of up to 50GB in size and up to 3h - duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. - TIME\_SEGMENT\_START and TIME\_SEGMENT\_END must be within the length - of the video, and end has to be after the start. Three sample rows: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH leads + to video of up to 50GB in size and up to 3h duration. Supported + extensions: .MOV, .MPEG4, .MP4, .AVI. TIME_SEGMENT_START and + TIME_SEGMENT_END must be within the length of the video, and end has + to be after the start. Three sample rows: gs://folder/video1.mp4,10,240 gs://folder/video1.mp4,300,360 gs://folder/vid2.mov,0,inf - For Text Classification: CSV file(s) with each line having just a - single column: GCS\_FILE\_PATH \| TEXT\_SNIPPET Any given text file - can have size upto 128kB. Any given text snippet content must have - 60,000 characters or less. Three sample rows: gs://folder/text1.txt - "Some text content to predict" gs://folder/text3.pdf Supported file + single column: GCS_FILE_PATH \| TEXT_SNIPPET Any given text file can + have size upto 128kB. Any given text snippet content must have 60,000 + characters or less. Three sample rows: gs://folder/text1.txt “Some + text content to predict” gs://folder/text3.pdf Supported file extensions: .txt, .pdf - For Text Sentiment: CSV file(s) with each line having just a single - column: GCS\_FILE\_PATH \| TEXT\_SNIPPET Any given text file can have + column: GCS_FILE_PATH \| TEXT_SNIPPET Any given text file can have size upto 128kB. Any given text snippet content must have 500 - characters or less. Three sample rows: gs://folder/text1.txt "Some - text content to predict" gs://folder/text3.pdf Supported file + characters or less. Three sample rows: gs://folder/text1.txt “Some + text content to predict” gs://folder/text3.pdf Supported file extensions: .txt, .pdf - - For Text Extraction .JSONL (i.e. JSON Lines) file(s) which either + - For Text Extraction .JSONL (i.e. JSON Lines) file(s) which either provide text in-line or as documents (for a single BatchPredict call only one of the these formats may be used). The in-line .JSONL file(s) contain per line a proto that wraps a temporary user-assigned - TextSnippet ID (string up to 2000 characters long) called "id", a + TextSnippet ID (string up to 2000 characters long) called “id”, a TextSnippet proto (in json representation) and zero or more TextFeature protos. Any given text snippet content must have 30,000 characters or less, and also be UTF-8 NFC encoded (ASCII already is). The IDs provided should be unique. The document .JSONL file(s) contain, per line, a proto that wraps a Document proto with - input\_config set. Only PDF documents are supported now, and each + input_config set. Only PDF documents are supported now, and each document must be up to 2MB large. Any given .JSONL file must be 100MB or smaller, and no more than 20 files may be given. - For Tables: Either - [gcs\_source][google.cloud.automl.v1beta1.InputConfig.gcs\_source] or + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or - [bigquery\_source][google.cloud.automl.v1beta1.InputConfig.bigquery\_source]. + [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source]. GCS case: CSV file(s), each by itself 10GB or smaller and total size must be 100GB or smaller, where first file must have a header containing column names. If the first row of a subsequent file is the same as the header, then it is also treated as a header. All other rows contain values for the corresponding columns. The column names must contain the - model's + model’s - [input\_feature\_column\_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input\_feature\_column\_specs] + [input_feature_column_specs’][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] - [display\_name-s][google.cloud.automl.v1beta1.ColumnSpec.display\_name] - (order doesn't matter). The columns corresponding to the model's input + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] + (order doesn’t matter). The columns corresponding to the model’s input feature column specs must contain values compatible with the column - spec's data types. Prediction on all the rows, i.e. the CSV lines, will + spec’s data types. Prediction on all the rows, i.e. the CSV lines, will be attempted. For FORECASTING - [prediction\_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction\_type]: + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: all columns having - [TIME\_SERIES\_AVAILABLE\_PAST\_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] - type will be ignored. First three sample rows of a CSV file: "First - Name","Last Name","Dob","Addresses" + [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] + type will be ignored. First three sample rows of a CSV file: “First + Name”,“Last Name”,“Dob”,“Addresses” - "John","Doe","1968-01-22","[{"status":"current","address":"123\_First\_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456\_Main\_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" + “John”,“Doe”,“1968-01-22”,“[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]” - "Jane","Doe","1980-10-16","[{"status":"current","address":"789\_Any\_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321\_Main\_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} + “Jane”,“Doe”,“1980-10-16”,"[{“status”:“current”,“address”:“789_Any_Avenue”,“city”:“Albany”,“state”:“NY”,“zip”:“33333”,“numberOfYears”:“2”},{“status”:“previous”,“address”:“321_Main_Street”,“city”:“Hoboken”,“state”:“NJ”,“zip”:“44444”,“numberOfYears”:“3”}]} BigQuery case: An URI of a BigQuery table. The user data size of the BigQuery table must be 100GB or smaller. The column names must contain - the model's + the model’s - [input\_feature\_column\_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input\_feature\_column\_specs] + [input_feature_column_specs’][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] - [display\_name-s][google.cloud.automl.v1beta1.ColumnSpec.display\_name] - (order doesn't matter). The columns corresponding to the model's input + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] + (order doesn’t matter). The columns corresponding to the model’s input feature column specs must contain values compatible with the column - spec's data types. Prediction on all the rows of the table will be + spec’s data types. Prediction on all the rows of the table will be attempted. For FORECASTING - [prediction\_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction\_type]: + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: all columns having - [TIME\_SERIES\_AVAILABLE\_PAST\_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] + [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] type will be ignored. - Definitions: GCS\_FILE\_PATH = A path to file on GCS, e.g. - "gs://folder/video.avi". TEXT\_SNIPPET = A content of a text snippet, - UTF-8 encoded, enclosed within double quotes ("") TIME\_SEGMENT\_START = - TIME\_OFFSET Expresses a beginning, inclusive, of a time segment within - an example that has a time dimension (e.g. video). TIME\_SEGMENT\_END = - TIME\_OFFSET Expresses an end, exclusive, of a time segment within an - example that has a time dimension (e.g. video). TIME\_OFFSET = A number - of seconds as measured from the start of an example (e.g. video). - Fractions are allowed, up to a microsecond precision. "inf" is allowed + Definitions: GCS_FILE_PATH = A path to file on GCS, e.g. + “gs://folder/video.avi”. TEXT_SNIPPET = A content of a text snippet, + UTF-8 encoded, enclosed within double quotes ("“) TIME_SEGMENT_START = + TIME_OFFSET Expresses a beginning, inclusive, of a time segment within + an example that has a time dimension (e.g. video). TIME_SEGMENT_END = + TIME_OFFSET Expresses an end, exclusive, of a time segment within an + example that has a time dimension (e.g. video). TIME_OFFSET = A number + of seconds as measured from the start of an example (e.g. video). + Fractions are allowed, up to a microsecond precision.”inf" is allowed and it means the end of the example. - Errors: If any of the provided CSV files can't be parsed or if more than + Errors: If any of the provided CSV files can’t be parsed or if more than certain percent of CSV rows cannot be processed then the operation fails and prediction does not happen. Regardless of overall success or failure the per-row failures, up to a certain count cap, will be listed in - Operation.metadata.partial\_failures. + Operation.metadata.partial_failures. Attributes: @@ -1323,17 +1352,17 @@ The BigQuery location for the input content. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BatchPredictInputConfig) - ), + }, ) _sym_db.RegisterMessage(BatchPredictInputConfig) DocumentInputConfig = _reflection.GeneratedProtocolMessageType( "DocumentInputConfig", (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENTINPUTCONFIG, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""Input configuration of a + { + "DESCRIPTOR": _DOCUMENTINPUTCONFIG, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """Input configuration of a [Document][google.cloud.automl.v1beta1.Document]. @@ -1344,41 +1373,41 @@ Supported extensions: .PDF. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DocumentInputConfig) - ), + }, ) _sym_db.RegisterMessage(DocumentInputConfig) OutputConfig = _reflection.GeneratedProtocolMessageType( "OutputConfig", (_message.Message,), - dict( - DESCRIPTOR=_OUTPUTCONFIG, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""\* For Translation: CSV file ``translation.csv``, with - each line in format: ML\_USE,GCS\_FILE\_PATH GCS\_FILE\_PATH leads to a - .TSV file which describes examples that have given ML\_USE, using the - following row format per line: TEXT\_SNIPPET (in source language) - \\tTEXT\_SNIPPET (in target language) + { + "DESCRIPTOR": _OUTPUTCONFIG, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """\* For Translation: CSV file ``translation.csv``, with + each line in format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV + file which describes examples that have given ML_USE, using the + following row format per line: TEXT_SNIPPET (in source language) + \\tTEXT_SNIPPET (in target language) - For Tables: Output depends on whether the dataset was imported from GCS or BigQuery. GCS case: - [gcs\_destination][google.cloud.automl.v1beta1.OutputConfig.gcs\_destination] + [gcs_destination][google.cloud.automl.v1beta1.OutputConfig.gcs_destination] must be set. Exported are CSV file(s) ``tables_1.csv``, - ``tables_2.csv``,...,\ ``tables_N.csv`` with each having as header line - the table's column names, and all other lines contain values for the + ``tables_2.csv``,…,\ ``tables_N.csv`` with each having as header line + the table’s column names, and all other lines contain values for the header columns. BigQuery case: - [bigquery\_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery\_destination] + [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] pointing to a BigQuery project must be set. In the given project a new dataset will be created with name ``export_data__`` - where will be made BigQuery-dataset-name compatible (e.g. most special + where will be made BigQuery-dataset-name compatible (e.g. most special characters will become underscores), and timestamp will be in - YYYY\_MM\_DDThh\_mm\_ss\_sssZ "based on ISO-8601" format. In that - dataset a new table called ``primary_table`` will be created, and filled - with precisely the same data as this obtained on import. + YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In that dataset a + new table called ``primary_table`` will be created, and filled with + precisely the same data as this obtained on import. Attributes: @@ -1388,49 +1417,49 @@ The Google Cloud Storage location where the output is to be written to. For Image Object Detection, Text Extraction, Video Classification and Tables, in the given directory a new - directory will be created with name: export\_data-- where + directory will be created with name: export_data-- where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export output will be written into that directory. bigquery_destination: The BigQuery location where the output is to be written to. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.OutputConfig) - ), + }, ) _sym_db.RegisterMessage(OutputConfig) BatchPredictOutputConfig = _reflection.GeneratedProtocolMessageType( "BatchPredictOutputConfig", (_message.Message,), - dict( - DESCRIPTOR=_BATCHPREDICTOUTPUTCONFIG, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""Output configuration for BatchPredict Action. + { + "DESCRIPTOR": _BATCHPREDICTOUTPUTCONFIG, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """Output configuration for BatchPredict Action. As destination the - [gcs\_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs\_destination] - must be set unless specified otherwise for a domain. If gcs\_destination + [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination] + must be set unless specified otherwise for a domain. If gcs_destination is set then in the given directory a new directory is created. Its name - will be "prediction--", where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ + will be “prediction--”, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends on the ML problem the predictions are made for. - For Image Classification: In the created directory files ``image_classification_1.jsonl``, - ``image_classification_2.jsonl``,...,\ ``image_classification_N.jsonl`` + ``image_classification_2.jsonl``,…,\ ``image_classification_N.jsonl`` will be created, where N may be 1, and depends on the total number of the successfully predicted images and annotations. A single image will be listed only once with all its annotations, and its annotations will never be split across files. Each .JSONL file will contain, per line, a JSON representation of a proto that wraps - image's "ID" : "" followed by a list of zero or more + image’s “ID” : “” followed by a list of zero or more AnnotationPayload protos (called annotations), which have classification detail populated. If prediction for any image failed (partially or completely), then an additional ``errors_1.jsonl``, - ``errors_2.jsonl``,..., ``errors_N.jsonl`` files will be created (N + ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N depends on total number of failed predictions). These files will have - a JSON representation of a proto that wraps the same "ID" : "" but + a JSON representation of a proto that wraps the same “ID” : “” but here followed by exactly one ```google.rpc.Status`` `__ @@ -1438,157 +1467,156 @@ - For Image Object Detection: In the created directory files ``image_object_detection_1.jsonl``, - ``image_object_detection_2.jsonl``,...,\ ``image_object_detection_N.jsonl`` + ``image_object_detection_2.jsonl``,…,\ ``image_object_detection_N.jsonl`` will be created, where N may be 1, and depends on the total number of the successfully predicted images and annotations. Each .JSONL file will contain, per line, a JSON representation of a proto that wraps - image's "ID" : "" followed by a list of zero or more + image’s “ID” : “” followed by a list of zero or more AnnotationPayload protos (called annotations), which have - image\_object\_detection detail populated. A single image will be + image_object_detection detail populated. A single image will be listed only once with all its annotations, and its annotations will never be split across files. If prediction for any image failed (partially or completely), then additional ``errors_1.jsonl``, - ``errors_2.jsonl``,..., ``errors_N.jsonl`` files will be created (N + ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N depends on total number of failed predictions). These files will have - a JSON representation of a proto that wraps the same "ID" : "" but + a JSON representation of a proto that wraps the same “ID” : “” but here followed by exactly one ```google.rpc.Status`` `__ containing only ``code`` and ``message``\ fields. \* For Video - Classification: In the created directory a video\_classification.csv + Classification: In the created directory a video_classification.csv file, and a .JSON file per each video classification requested in the - input (i.e. each line in given CSV(s)), will be created. + input (i.e. each line in given CSV(s)), will be created. :: - The format of video_classification.csv is: + The format of video_classification.csv is: - GCS\_FILE\_PATH,TIME\_SEGMENT\_START,TIME\_SEGMENT\_END,JSON\_FILE\_NAME,STATUS - where: GCS\_FILE\_PATH,TIME\_SEGMENT\_START,TIME\_SEGMENT\_END = matches - 1 to 1 the prediction input lines (i.e. video\_classification.csv has + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS + where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to + 1 the prediction input lines (i.e. video_classification.csv has precisely the same number of lines as the prediction input had.) - JSON\_FILE\_NAME = Name of .JSON file in the output directory, which - contains prediction responses for the video time segment. STATUS = "OK" + JSON_FILE_NAME = Name of .JSON file in the output directory, which + contains prediction responses for the video time segment. STATUS = “OK” if prediction completed successfully, or an error code with message - otherwise. If STATUS is not "OK" then the .JSON file for that line may + otherwise. If STATUS is not “OK” then the .JSON file for that line may not exist or be empty. :: - Each .JSON file, assuming STATUS is "OK", will contain a list of - AnnotationPayload protos in JSON format, which are the predictions - for the video time segment the file is assigned to in the - video_classification.csv. All AnnotationPayload protos will have - video_classification field set, and will be sorted by - video_classification.type field (note that the returned types are - governed by `classifaction_types` parameter in - [PredictService.BatchPredictRequest.params][]). + Each .JSON file, assuming STATUS is "OK", will contain a list of + AnnotationPayload protos in JSON format, which are the predictions + for the video time segment the file is assigned to in the + video_classification.csv. All AnnotationPayload protos will have + video_classification field set, and will be sorted by + video_classification.type field (note that the returned types are + governed by `classifaction_types` parameter in + [PredictService.BatchPredictRequest.params][]). - For Video Object Tracking: In the created directory a - video\_object\_tracking.csv file will be created, and multiple files - video\_object\_trackinng\_1.json, - video\_object\_trackinng\_2.json,..., - video\_object\_trackinng\_N.json, where N is the number of requests - in the input (i.e. the number of lines in given CSV(s)). + video_object_tracking.csv file will be created, and multiple files + video_object_trackinng_1.json, video_object_trackinng_2.json,…, + video_object_trackinng_N.json, where N is the number of requests in + the input (i.e. the number of lines in given CSV(s)). :: - The format of video_object_tracking.csv is: + The format of video_object_tracking.csv is: - GCS\_FILE\_PATH,TIME\_SEGMENT\_START,TIME\_SEGMENT\_END,JSON\_FILE\_NAME,STATUS - where: GCS\_FILE\_PATH,TIME\_SEGMENT\_START,TIME\_SEGMENT\_END = matches - 1 to 1 the prediction input lines (i.e. video\_object\_tracking.csv has + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS + where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to + 1 the prediction input lines (i.e. video_object_tracking.csv has precisely the same number of lines as the prediction input had.) - JSON\_FILE\_NAME = Name of .JSON file in the output directory, which - contains prediction responses for the video time segment. STATUS = "OK" + JSON_FILE_NAME = Name of .JSON file in the output directory, which + contains prediction responses for the video time segment. STATUS = “OK” if prediction completed successfully, or an error code with message - otherwise. If STATUS is not "OK" then the .JSON file for that line may + otherwise. If STATUS is not “OK” then the .JSON file for that line may not exist or be empty. :: - Each .JSON file, assuming STATUS is "OK", will contain a list of - AnnotationPayload protos in JSON format, which are the predictions - for each frame of the video time segment the file is assigned to in - video_object_tracking.csv. All AnnotationPayload protos will have - video_object_tracking field set. + Each .JSON file, assuming STATUS is "OK", will contain a list of + AnnotationPayload protos in JSON format, which are the predictions + for each frame of the video time segment the file is assigned to in + video_object_tracking.csv. All AnnotationPayload protos will have + video_object_tracking field set. - For Text Classification: In the created directory files ``text_classification_1.jsonl``, - ``text_classification_2.jsonl``,...,\ ``text_classification_N.jsonl`` + ``text_classification_2.jsonl``,…,\ ``text_classification_N.jsonl`` will be created, where N may be 1, and depends on the total number of inputs and annotations found. :: - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text snippet or input text file and a list of - zero or more AnnotationPayload protos (called annotations), which - have classification detail populated. A single text snippet or file - will be listed only once with all its annotations, and its - annotations will never be split across files. + Each .JSONL file will contain, per line, a JSON representation of a + proto that wraps input text snippet or input text file and a list of + zero or more AnnotationPayload protos (called annotations), which + have classification detail populated. A single text snippet or file + will be listed only once with all its annotations, and its + annotations will never be split across files. - If prediction for any text snippet or file failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input text snippet or input text file followed by - exactly one + If prediction for any text snippet or file failed (partially or + completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input text snippet or input text file followed by + exactly one ```google.rpc.Status`` `__ containing only ``code`` and ``message``. - For Text Sentiment: In the created directory files ``text_sentiment_1.jsonl``, - ``text_sentiment_2.jsonl``,...,\ ``text_sentiment_N.jsonl`` will be + ``text_sentiment_2.jsonl``,…,\ ``text_sentiment_N.jsonl`` will be created, where N may be 1, and depends on the total number of inputs and annotations found. :: - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text snippet or input text file and a list of - zero or more AnnotationPayload protos (called annotations), which - have text_sentiment detail populated. A single text snippet or file - will be listed only once with all its annotations, and its - annotations will never be split across files. + Each .JSONL file will contain, per line, a JSON representation of a + proto that wraps input text snippet or input text file and a list of + zero or more AnnotationPayload protos (called annotations), which + have text_sentiment detail populated. A single text snippet or file + will be listed only once with all its annotations, and its + annotations will never be split across files. - If prediction for any text snippet or file failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input text snippet or input text file followed by - exactly one + If prediction for any text snippet or file failed (partially or + completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input text snippet or input text file followed by + exactly one ```google.rpc.Status`` `__ containing only ``code`` and ``message``. - For Text Extraction: In the created directory files ``text_extraction_1.jsonl``, - ``text_extraction_2.jsonl``,...,\ ``text_extraction_N.jsonl`` will be + ``text_extraction_2.jsonl``,…,\ ``text_extraction_N.jsonl`` will be created, where N may be 1, and depends on the total number of inputs and annotations found. The contents of these .JSONL file(s) depend on whether the input used inline text, or documents. If input was inline, then each .JSONL file will contain, per line, a JSON - representation of a proto that wraps given in request text snippet's - "id" (if specified), followed by input text snippet, and a list of + representation of a proto that wraps given in request text snippet’s + “id” (if specified), followed by input text snippet, and a list of zero or more AnnotationPayload protos (called annotations), which - have text\_extraction detail populated. A single text snippet will be + have text_extraction detail populated. A single text snippet will be listed only once with all its annotations, and its annotations will never be split across files. If input used documents, then each .JSONL file will contain, per line, a JSON representation of a proto that wraps given in request document proto, followed by its OCR-ed representation in the form of a text snippet, finally followed by a list of zero or more AnnotationPayload protos (called annotations), - which have text\_extraction detail populated and refer, via their + which have text_extraction detail populated and refer, via their indices, to the OCR-ed text snippet. A single document (and its text snippet) will be listed only once with all its annotations, and its annotations will never be split across files. If prediction for any text snippet failed (partially or completely), then additional - ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` files + ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N depends on total number of failed predictions). These files will have a JSON representation of a proto that wraps - either the "id" : "" (in case of inline) or the document proto (in + either the “id” : “” (in case of inline) or the document proto (in case of document) but here followed by exactly one ```google.rpc.Status`` `__ @@ -1596,44 +1624,44 @@ - For Tables: Output depends on whether - [gcs\_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs\_destination] + [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination] or - [bigquery\_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.bigquery\_destination] + [bigquery_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.bigquery_destination] is set (either is allowed). GCS case: In the created directory files - ``tables_1.csv``, ``tables_2.csv``,..., ``tables_N.csv`` will be - created, where N may be 1, and depends on the total number of the - successfully predicted rows. For all CLASSIFICATION + ``tables_1.csv``, ``tables_2.csv``,…, ``tables_N.csv`` will be created, + where N may be 1, and depends on the total number of the successfully + predicted rows. For all CLASSIFICATION - [prediction\_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction\_type]: - Each .csv file will contain a header, listing all columns' + [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: + Each .csv file will contain a header, listing all columns’ - [display\_name-s][google.cloud.automl.v1beta1.ColumnSpec.display\_name] + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] given on input followed by M target column names in the format of - "<[target\_column\_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target\_column\_spec] + "<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - [display\_name][google.cloud.automl.v1beta1.ColumnSpec.display\_name]>\_\_score" - where M is the number of distinct target values, i.e. number of distinct + [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>\_\_score" + where M is the number of distinct target values, i.e. number of distinct values in the target column of the table used to train the model. Subsequent lines will contain the respective values of successfully - predicted rows, with the last, i.e. the target, columns having the + predicted rows, with the last, i.e. the target, columns having the corresponding prediction [scores][google.cloud.automl.v1beta1.TablesAnnotation.score]. For REGRESSION and FORECASTING - [prediction\_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction\_type]: - Each .csv file will contain a header, listing all columns' - [display\_name-s][google.cloud.automl.v1beta1.display\_name] given on + [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: + Each .csv file will contain a header, listing all columns’ + [display_name-s][google.cloud.automl.v1beta1.display_name] given on input followed by the predicted target column with name in the format of - "predicted\_<[target\_column\_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target\_column\_spec] + "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - [display\_name][google.cloud.automl.v1beta1.ColumnSpec.display\_name]>" + [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" Subsequent lines will contain the respective values of successfully - predicted rows, with the last, i.e. the target, column having the + predicted rows, with the last, i.e. the target, column having the predicted target value. If prediction for any rows failed, then an - additional ``errors_1.csv``, ``errors_2.csv``,..., ``errors_N.csv`` will + additional ``errors_1.csv``, ``errors_2.csv``,…, ``errors_N.csv`` will be created (N depends on total number of failed rows). These files will have analogous format as ``tables_*.csv``, but always with a single target column having @@ -1642,22 +1670,22 @@ represented as a JSON string, and containing only ``code`` and ``message``. BigQuery case: - [bigquery\_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery\_destination] + [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] pointing to a BigQuery project must be set. In the given project a new dataset will be created with name ``prediction__`` where - will be made BigQuery-dataset-name compatible (e.g. most special + will be made BigQuery-dataset-name compatible (e.g. most special characters will become underscores), and timestamp will be in - YYYY\_MM\_DDThh\_mm\_ss\_sssZ "based on ISO-8601" format. In the dataset - two tables will be created, ``predictions``, and ``errors``. The - ``predictions`` table's column names will be the input columns' + YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In the dataset two + tables will be created, ``predictions``, and ``errors``. The + ``predictions`` table’s column names will be the input columns’ - [display\_name-s][google.cloud.automl.v1beta1.ColumnSpec.display\_name] + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] followed by the target column with name in the format of - "predicted\_<[target\_column\_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target\_column\_spec] + "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - [display\_name][google.cloud.automl.v1beta1.ColumnSpec.display\_name]>" + [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" The input feature columns will contain the respective values of successfully predicted rows, with the target column having an ARRAY of @@ -1668,9 +1696,9 @@ has analogous input columns while the target column name is in the format of - "errors\_<[target\_column\_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target\_column\_spec] + "errors_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - [display\_name][google.cloud.automl.v1beta1.ColumnSpec.display\_name]>", + [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>", and as a value has ```google.rpc.Status`` `__ @@ -1687,26 +1715,26 @@ The BigQuery location where the output is to be written to. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BatchPredictOutputConfig) - ), + }, ) _sym_db.RegisterMessage(BatchPredictOutputConfig) ModelExportOutputConfig = _reflection.GeneratedProtocolMessageType( "ModelExportOutputConfig", (_message.Message,), - dict( - ParamsEntry=_reflection.GeneratedProtocolMessageType( + { + "ParamsEntry": _reflection.GeneratedProtocolMessageType( "ParamsEntry", (_message.Message,), - dict( - DESCRIPTOR=_MODELEXPORTOUTPUTCONFIG_PARAMSENTRY, - __module__="google.cloud.automl_v1beta1.proto.io_pb2" + { + "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG_PARAMSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ModelExportOutputConfig.ParamsEntry) - ), + }, ), - DESCRIPTOR=_MODELEXPORTOUTPUTCONFIG, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""Output configuration for ModelExport Action. + "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """Output configuration for ModelExport Action. Attributes: @@ -1715,49 +1743,49 @@ gcs_destination: The Google Cloud Storage location where the model is to be written to. This location may only be set for the following - model formats: "tflite", "edgetpu\_tflite", - "tf\_saved\_model", "tf\_js", "core\_ml". Under the directory - given as the destination a new one with name "model-export--", - where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 - format, will be created. Inside the model and any of its - supporting files will be written. + model formats: “tflite”, “edgetpu_tflite”, “tf_saved_model”, + “tf_js”, “core_ml”. Under the directory given as the + destination a new one with name “model-export--”, where + timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will + be created. Inside the model and any of its supporting files + will be written. gcr_destination: The GCR location where model image is to be pushed to. This location may only be set for the following model formats: - "docker". The model image will be created under the given + “docker”. The model image will be created under the given URI. model_format: The format in which the model must be exported. The available, and default, formats depend on the problem and model type (if - given problem and type combination doesn't have a format + given problem and type combination doesn’t have a format listed, it means its models are not exportable): - For Image Classification mobile-low-latency-1, mobile-versatile-1, - mobile-high-accuracy-1: "tflite" (default), "edgetpu\_tflite", - "tf\_saved\_model", "tf\_js", "docker". - For Image + mobile-high-accuracy-1: “tflite” (default), “edgetpu_tflite”, + “tf_saved_model”, “tf_js”, “docker”. - For Image Classification mobile-core-ml-low-latency-1, mobile-core- - ml-versatile-1, mobile-core-ml-high-accuracy-1: "core\_ml" + ml-versatile-1, mobile-core-ml-high-accuracy-1: “core_ml” (default). Formats description: - tflite - Used for Android - mobile devices. - edgetpu\_tflite - Used for `Edge TPU + mobile devices. - edgetpu_tflite - Used for `Edge TPU `__ devices. - - tf\_saved\_model - A tensorflow model in SavedModel format. - - tf\_js - A `TensorFlow.js `__ + tf_saved_model - A tensorflow model in SavedModel format. - + tf_js - A `TensorFlow.js `__ model that can be used in the browser and in Node.js using JavaScript. - docker - Used for Docker containers. Use the params field to customize the container. The container is verified to work correctly on ubuntu 16.04 operating system. See more at [containers quickstart](https: //cloud.google.com/vision/automl/docs/containers-gcs- - quickstart) \* core\_ml - Used for iOS mobile devices. + quickstart) \* core_ml - Used for iOS mobile devices. params: Additional model-type and format specific parameters describing the requirements for the to be exported model files, any string must be up to 25000 characters long. - For - ``docker`` format: ``cpu_architecture`` - (string) "x86\_64" - (default). ``gpu_architecture`` - (string) "none" (default), - "nvidia". + ``docker`` format: ``cpu_architecture`` - (string) “x86_64” + (default). ``gpu_architecture`` - (string) “none” (default), + “nvidia”. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ModelExportOutputConfig) - ), + }, ) _sym_db.RegisterMessage(ModelExportOutputConfig) _sym_db.RegisterMessage(ModelExportOutputConfig.ParamsEntry) @@ -1765,33 +1793,33 @@ ExportEvaluatedExamplesOutputConfig = _reflection.GeneratedProtocolMessageType( "ExportEvaluatedExamplesOutputConfig", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTEVALUATEDEXAMPLESOUTPUTCONFIG, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""Output configuration for ExportEvaluatedExamples Action. + { + "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESOUTPUTCONFIG, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """Output configuration for ExportEvaluatedExamples Action. Note that this call is available only for 30 days since the moment the model was evaluated. The output depends on the domain, as follows (note that only examples from the TEST set are exported): - For Tables: - [bigquery\_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery\_destination] + [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] pointing to a BigQuery project must be set. In the given project a new dataset will be created with name ``export_evaluated_examples__`` - where will be made BigQuery-dataset-name compatible (e.g. most special + where will be made BigQuery-dataset-name compatible (e.g. most special characters will become underscores), and timestamp will be in - YYYY\_MM\_DDThh\_mm\_ss\_sssZ "based on ISO-8601" format. In the dataset - an ``evaluated_examples`` table will be created. It will have all the - same columns as the + YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In the dataset an + ``evaluated_examples`` table will be created. It will have all the same + columns as the - [primary\_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary\_table\_spec\_id] - of the [dataset][google.cloud.automl.v1beta1.Model.dataset\_id] from - which the model was created, as they were at the moment of model's + [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_spec_id] + of the [dataset][google.cloud.automl.v1beta1.Model.dataset_id] from + which the model was created, as they were at the moment of model’s evaluation (this includes the target column with its ground truth), - followed by a column called "predicted\_". That last column will contain - the model's prediction result for each respective row, given as ARRAY of + followed by a column called “predicted\_”. That last column will contain + the model’s prediction result for each respective row, given as ARRAY of [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], represented as STRUCT-s, containing [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. @@ -1804,57 +1832,57 @@ The BigQuery location where the output is to be written to. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig) - ), + }, ) _sym_db.RegisterMessage(ExportEvaluatedExamplesOutputConfig) GcsSource = _reflection.GeneratedProtocolMessageType( "GcsSource", (_message.Message,), - dict( - DESCRIPTOR=_GCSSOURCE, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""The Google Cloud Storage location for the input content. + { + "DESCRIPTOR": _GCSSOURCE, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """The Google Cloud Storage location for the input content. Attributes: input_uris: Required. Google Cloud Storage URIs to input files, up to 2000 - characters long. Accepted forms: \* Full object path, e.g. - gs://bucket/directory/object.csv + characters long. Accepted forms: \* Full object path, + e.g. gs://bucket/directory/object.csv """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GcsSource) - ), + }, ) _sym_db.RegisterMessage(GcsSource) BigQuerySource = _reflection.GeneratedProtocolMessageType( "BigQuerySource", (_message.Message,), - dict( - DESCRIPTOR=_BIGQUERYSOURCE, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""The BigQuery location for the input content. + { + "DESCRIPTOR": _BIGQUERYSOURCE, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """The BigQuery location for the input content. Attributes: input_uri: Required. BigQuery URI to a table, up to 2000 characters long. - Accepted forms: \* BigQuery path e.g. - bq://projectId.bqDatasetId.bqTableId + Accepted forms: \* BigQuery path + e.g. bq://projectId.bqDatasetId.bqTableId """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BigQuerySource) - ), + }, ) _sym_db.RegisterMessage(BigQuerySource) GcsDestination = _reflection.GeneratedProtocolMessageType( "GcsDestination", (_message.Message,), - dict( - DESCRIPTOR=_GCSDESTINATION, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""The Google Cloud Storage location where the output is to + { + "DESCRIPTOR": _GCSDESTINATION, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """The Google Cloud Storage location where the output is to be written to. @@ -1864,39 +1892,39 @@ 2000 characters long. Accepted forms: \* Prefix path: gs://bucket/directory The requesting user must have write permission to the bucket. The directory is created if it - doesn't exist. + doesn’t exist. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GcsDestination) - ), + }, ) _sym_db.RegisterMessage(GcsDestination) BigQueryDestination = _reflection.GeneratedProtocolMessageType( "BigQueryDestination", (_message.Message,), - dict( - DESCRIPTOR=_BIGQUERYDESTINATION, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""The BigQuery location for the output content. + { + "DESCRIPTOR": _BIGQUERYDESTINATION, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """The BigQuery location for the output content. Attributes: output_uri: Required. BigQuery URI to a project, up to 2000 characters - long. Accepted forms: \* BigQuery path e.g. bq://projectId + long. Accepted forms: \* BigQuery path e.g. bq://projectId """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BigQueryDestination) - ), + }, ) _sym_db.RegisterMessage(BigQueryDestination) GcrDestination = _reflection.GeneratedProtocolMessageType( "GcrDestination", (_message.Message,), - dict( - DESCRIPTOR=_GCRDESTINATION, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""The GCR location where the image must be pushed to. + { + "DESCRIPTOR": _GCRDESTINATION, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """The GCR location where the image must be pushed to. Attributes: @@ -1904,13 +1932,13 @@ Required. Google Contained Registry URI of the new image, up to 2000 characters long. See https: //cloud.google.com/container-registry/do // cs/pushing-and- - pulling#pushing\_an\_image\_to\_a\_registry Accepted forms: \* + pulling#pushing_an_image_to_a_registry Accepted forms: \* [HOSTNAME]/[PROJECT-ID]/[IMAGE] \* [HOSTNAME]/[PROJECT- ID]/[IMAGE]:[TAG] The requesting user must have permission to push images the project. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GcrDestination) - ), + }, ) _sym_db.RegisterMessage(GcrDestination) diff --git a/google/cloud/automl_v1beta1/proto/model.proto b/google/cloud/automl_v1beta1/proto/model.proto index 8c53d9b5..2b2e8d73 100644 --- a/google/cloud/automl_v1beta1/proto/model.proto +++ b/google/cloud/automl_v1beta1/proto/model.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; +import "google/api/resource.proto"; import "google/cloud/automl/v1beta1/image.proto"; import "google/cloud/automl/v1beta1/tables.proto"; import "google/cloud/automl/v1beta1/text.proto"; @@ -33,6 +33,11 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // API proto representing a trained machine learning model. message Model { + option (google.api.resource) = { + type: "automl.googleapis.com/Model" + pattern: "projects/{project}/locations/{location}/models/{model}" + }; + // Deployment state of the model. enum DeploymentState { // Should not be used, an un-set enum has this value by default. diff --git a/google/cloud/automl_v1beta1/proto/model_evaluation.proto b/google/cloud/automl_v1beta1/proto/model_evaluation.proto index ce2db614..d5633fcd 100644 --- a/google/cloud/automl_v1beta1/proto/model_evaluation.proto +++ b/google/cloud/automl_v1beta1/proto/model_evaluation.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; +import "google/api/resource.proto"; import "google/cloud/automl/v1beta1/classification.proto"; import "google/cloud/automl/v1beta1/detection.proto"; import "google/cloud/automl/v1beta1/regression.proto"; @@ -35,6 +35,11 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // Evaluation results of a model. message ModelEvaluation { + option (google.api.resource) = { + type: "automl.googleapis.com/ModelEvaluation" + pattern: "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}" + }; + // Output only. Problem type specific evaluation metrics. oneof metrics { // Model evaluation metrics for image, text, video and tables diff --git a/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py b/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py index 681a2663..73e1f8e3 100644 --- a/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py +++ b/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/model_evaluation.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,6 +12,7 @@ _sym_db = _symbol_database.Default() +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1beta1.proto import ( classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2, ) @@ -44,13 +42,10 @@ name="google/cloud/automl_v1beta1/proto/model_evaluation.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n8google/cloud/automl_v1beta1/proto/model_evaluation.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x31google/cloud/automl_v1beta1/proto/detection.proto\x1a\x32google/cloud/automl_v1beta1/proto/regression.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a\x37google/cloud/automl_v1beta1/proto/text_extraction.proto\x1a\x36google/cloud/automl_v1beta1/proto/text_sentiment.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xa7\x07\n\x0fModelEvaluation\x12i\n!classification_evaluation_metrics\x18\x08 \x01(\x0b\x32<.google.cloud.automl.v1beta1.ClassificationEvaluationMetricsH\x00\x12\x61\n\x1dregression_evaluation_metrics\x18\x18 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.RegressionEvaluationMetricsH\x00\x12\x63\n\x1etranslation_evaluation_metrics\x18\t \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TranslationEvaluationMetricsH\x00\x12w\n)image_object_detection_evaluation_metrics\x18\x0c \x01(\x0b\x32\x42.google.cloud.automl.v1beta1.ImageObjectDetectionEvaluationMetricsH\x00\x12u\n(video_object_tracking_evaluation_metrics\x18\x0e \x01(\x0b\x32\x41.google.cloud.automl.v1beta1.VideoObjectTrackingEvaluationMetricsH\x00\x12h\n!text_sentiment_evaluation_metrics\x18\x0b \x01(\x0b\x32;.google.cloud.automl.v1beta1.TextSentimentEvaluationMetricsH\x00\x12j\n"text_extraction_evaluation_metrics\x18\r \x01(\x0b\x32<.google.cloud.automl.v1beta1.TextExtractionEvaluationMetricsH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1a\n\x12\x61nnotation_spec_id\x18\x02 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x0f \x01(\t\x12/\n\x0b\x63reate_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x17\x65valuated_example_count\x18\x06 \x01(\x05\x42\t\n\x07metricsB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n8google/cloud/automl_v1beta1/proto/model_evaluation.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x31google/cloud/automl_v1beta1/proto/detection.proto\x1a\x32google/cloud/automl_v1beta1/proto/regression.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a\x37google/cloud/automl_v1beta1/proto/text_extraction.proto\x1a\x36google/cloud/automl_v1beta1/proto/text_sentiment.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xb1\x08\n\x0fModelEvaluation\x12i\n!classification_evaluation_metrics\x18\x08 \x01(\x0b\x32<.google.cloud.automl.v1beta1.ClassificationEvaluationMetricsH\x00\x12\x61\n\x1dregression_evaluation_metrics\x18\x18 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.RegressionEvaluationMetricsH\x00\x12\x63\n\x1etranslation_evaluation_metrics\x18\t \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TranslationEvaluationMetricsH\x00\x12w\n)image_object_detection_evaluation_metrics\x18\x0c \x01(\x0b\x32\x42.google.cloud.automl.v1beta1.ImageObjectDetectionEvaluationMetricsH\x00\x12u\n(video_object_tracking_evaluation_metrics\x18\x0e \x01(\x0b\x32\x41.google.cloud.automl.v1beta1.VideoObjectTrackingEvaluationMetricsH\x00\x12h\n!text_sentiment_evaluation_metrics\x18\x0b \x01(\x0b\x32;.google.cloud.automl.v1beta1.TextSentimentEvaluationMetricsH\x00\x12j\n"text_extraction_evaluation_metrics\x18\r \x01(\x0b\x32<.google.cloud.automl.v1beta1.TextExtractionEvaluationMetricsH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1a\n\x12\x61nnotation_spec_id\x18\x02 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x0f \x01(\t\x12/\n\x0b\x63reate_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x17\x65valuated_example_count\x18\x06 \x01(\x05:\x87\x01\xea\x41\x83\x01\n%automl.googleapis.com/ModelEvaluation\x12Zprojects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}B\t\n\x07metricsB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_regression__pb2.DESCRIPTOR, @@ -206,7 +201,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -224,7 +219,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -242,7 +237,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -291,7 +286,7 @@ extensions=[], nested_types=[], enum_types=[], - serialized_options=None, + serialized_options=b"\352A\203\001\n%automl.googleapis.com/ModelEvaluation\022Zprojects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -302,10 +297,10 @@ index=0, containing_type=None, fields=[], - ) + ), ], - serialized_start=526, - serialized_end=1461, + serialized_start=553, + serialized_end=1626, ) _MODELEVALUATION.fields_by_name[ @@ -394,10 +389,10 @@ ModelEvaluation = _reflection.GeneratedProtocolMessageType( "ModelEvaluation", (_message.Message,), - dict( - DESCRIPTOR=_MODELEVALUATION, - __module__="google.cloud.automl_v1beta1.proto.model_evaluation_pb2", - __doc__="""Evaluation results of a model. + { + "DESCRIPTOR": _MODELEVALUATION, + "__module__": "google.cloud.automl_v1beta1.proto.model_evaluation_pb2", + "__doc__": """Evaluation results of a model. Attributes: @@ -430,40 +425,41 @@ evaluation applies to. The The ID is empty for the overall model evaluation. For Tables annotation specs in the dataset do not exist and this ID is always not set, but for - CLASSIFICATION [prediction\_type-s][google.cloud.automl.v1bet - a1.TablesModelMetadata.prediction\_type] the [display\_name][g - oogle.cloud.automl.v1beta1.ModelEvaluation.display\_name] - field is used. + CLASSIFICATION [prediction_type-s][google.cloud.automl.v1beta + 1.TablesModelMetadata.prediction_type] the [display_name][goog + le.cloud.automl.v1beta1.ModelEvaluation.display_name] field is + used. display_name: - Output only. The value of [display\_name][google.cloud.automl. - v1beta1.AnnotationSpec.display\_name] at the moment when the + Output only. The value of [display_name][google.cloud.automl.v + 1beta1.AnnotationSpec.display_name] at the moment when the model was trained. Because this field returns a value at model training time, for different models trained from the same dataset, the values may differ, since display names could had - been changed between the two model's trainings. For Tables - CLASSIFICATION [prediction\_type-s][google.cloud.automl.v1bet - a1.TablesModelMetadata.prediction\_type] distinct values of - the target column at the moment of the model evaluation are - populated here. The display\_name is empty for the overall + been changed between the two model’s trainings. For Tables + CLASSIFICATION [prediction_type-s][google.cloud.automl.v1beta + 1.TablesModelMetadata.prediction_type] distinct values of the + target column at the moment of the model evaluation are + populated here. The display_name is empty for the overall model evaluation. create_time: Output only. Timestamp when this model evaluation was created. evaluated_example_count: Output only. The number of examples used for model evaluation, - i.e. for which ground truth from time of model creation is + i.e. for which ground truth from time of model creation is compared against the predicted annotations created by the - model. For overall ModelEvaluation (i.e. with - annotation\_spec\_id not set) this is the total number of all + model. For overall ModelEvaluation (i.e. with + annotation_spec_id not set) this is the total number of all examples used for evaluation. Otherwise, this is the count of examples that according to the ground truth were annotated by - the [annotation\_spec\_id][google.cloud.automl.v1beta1.ModelE - valuation.annotation\_spec\_id]. + the [annotation_spec_id][google.cloud.automl.v1beta1.ModelEva + luation.annotation_spec_id]. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ModelEvaluation) - ), + }, ) _sym_db.RegisterMessage(ModelEvaluation) DESCRIPTOR._options = None +_MODELEVALUATION._options = None # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/model_pb2.py b/google/cloud/automl_v1beta1/proto/model_pb2.py index f669af20..284eeb3e 100644 --- a/google/cloud/automl_v1beta1/proto/model_pb2.py +++ b/google/cloud/automl_v1beta1/proto/model_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/model.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,6 +12,7 @@ _sym_db = _symbol_database.Default() +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1beta1.proto import ( image_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2, ) @@ -38,13 +36,10 @@ name="google/cloud/automl_v1beta1/proto/model.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n-google/cloud/automl_v1beta1/proto/model.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a,google/cloud/automl_v1beta1/proto/text.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a-google/cloud/automl_v1beta1/proto/video.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xf2\t\n\x05Model\x12[\n\x1atranslation_model_metadata\x18\x0f \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.TranslationModelMetadataH\x00\x12l\n#image_classification_model_metadata\x18\r \x01(\x0b\x32=.google.cloud.automl.v1beta1.ImageClassificationModelMetadataH\x00\x12j\n"text_classification_model_metadata\x18\x0e \x01(\x0b\x32<.google.cloud.automl.v1beta1.TextClassificationModelMetadataH\x00\x12o\n%image_object_detection_model_metadata\x18\x14 \x01(\x0b\x32>.google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadataH\x00\x12l\n#video_classification_model_metadata\x18\x17 \x01(\x0b\x32=.google.cloud.automl.v1beta1.VideoClassificationModelMetadataH\x00\x12m\n$video_object_tracking_model_metadata\x18\x15 \x01(\x0b\x32=.google.cloud.automl.v1beta1.VideoObjectTrackingModelMetadataH\x00\x12\x62\n\x1etext_extraction_model_metadata\x18\x13 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.TextExtractionModelMetadataH\x00\x12Q\n\x15tables_model_metadata\x18\x18 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.TablesModelMetadataH\x00\x12`\n\x1dtext_sentiment_model_metadata\x18\x16 \x01(\x0b\x32\x37.google.cloud.automl.v1beta1.TextSentimentModelMetadataH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x12\n\ndataset_id\x18\x03 \x01(\t\x12/\n\x0b\x63reate_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12L\n\x10\x64\x65ployment_state\x18\x08 \x01(\x0e\x32\x32.google.cloud.automl.v1beta1.Model.DeploymentState"Q\n\x0f\x44\x65ploymentState\x12 \n\x1c\x44\x45PLOYMENT_STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x44\x45PLOYED\x10\x01\x12\x0e\n\nUNDEPLOYED\x10\x02\x42\x10\n\x0emodel_metadataB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n-google/cloud/automl_v1beta1/proto/model.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a,google/cloud/automl_v1beta1/proto/text.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a-google/cloud/automl_v1beta1/proto/video.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xcc\n\n\x05Model\x12[\n\x1atranslation_model_metadata\x18\x0f \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.TranslationModelMetadataH\x00\x12l\n#image_classification_model_metadata\x18\r \x01(\x0b\x32=.google.cloud.automl.v1beta1.ImageClassificationModelMetadataH\x00\x12j\n"text_classification_model_metadata\x18\x0e \x01(\x0b\x32<.google.cloud.automl.v1beta1.TextClassificationModelMetadataH\x00\x12o\n%image_object_detection_model_metadata\x18\x14 \x01(\x0b\x32>.google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadataH\x00\x12l\n#video_classification_model_metadata\x18\x17 \x01(\x0b\x32=.google.cloud.automl.v1beta1.VideoClassificationModelMetadataH\x00\x12m\n$video_object_tracking_model_metadata\x18\x15 \x01(\x0b\x32=.google.cloud.automl.v1beta1.VideoObjectTrackingModelMetadataH\x00\x12\x62\n\x1etext_extraction_model_metadata\x18\x13 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.TextExtractionModelMetadataH\x00\x12Q\n\x15tables_model_metadata\x18\x18 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.TablesModelMetadataH\x00\x12`\n\x1dtext_sentiment_model_metadata\x18\x16 \x01(\x0b\x32\x37.google.cloud.automl.v1beta1.TextSentimentModelMetadataH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x12\n\ndataset_id\x18\x03 \x01(\t\x12/\n\x0b\x63reate_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12L\n\x10\x64\x65ployment_state\x18\x08 \x01(\x0e\x32\x32.google.cloud.automl.v1beta1.Model.DeploymentState"Q\n\x0f\x44\x65ploymentState\x12 \n\x1c\x44\x45PLOYMENT_STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x44\x45PLOYED\x10\x01\x12\x0e\n\nUNDEPLOYED\x10\x02:X\xea\x41U\n\x1b\x61utoml.googleapis.com/Model\x12\x36projects/{project}/locations/{location}/models/{model}B\x10\n\x0emodel_metadataB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__pb2.DESCRIPTOR, @@ -78,8 +73,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=1550, - serialized_end=1631, + serialized_start=1577, + serialized_end=1658, ) _sym_db.RegisterEnumDescriptor(_MODEL_DEPLOYMENTSTATE) @@ -262,7 +257,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -280,7 +275,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -298,7 +293,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -364,8 +359,8 @@ ], extensions=[], nested_types=[], - enum_types=[_MODEL_DEPLOYMENTSTATE], - serialized_options=None, + enum_types=[_MODEL_DEPLOYMENTSTATE,], + serialized_options=b"\352AU\n\033automl.googleapis.com/Model\0226projects/{project}/locations/{location}/models/{model}", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -376,10 +371,10 @@ index=0, containing_type=None, fields=[], - ) + ), ], - serialized_start=383, - serialized_end=1649, + serialized_start=410, + serialized_end=1766, ) _MODEL.fields_by_name[ @@ -495,10 +490,10 @@ Model = _reflection.GeneratedProtocolMessageType( "Model", (_message.Message,), - dict( - DESCRIPTOR=_MODEL, - __module__="google.cloud.automl_v1beta1.proto.model_pb2", - __doc__="""API proto representing a trained machine learning model. + { + "DESCRIPTOR": _MODEL, + "__module__": "google.cloud.automl_v1beta1.proto.model_pb2", + "__doc__": """API proto representing a trained machine learning model. Attributes: @@ -530,7 +525,7 @@ display_name: Required. The name of the model to show in the interface. The name can be up to 32 characters long and can consist only of - ASCII Latin letters A-Z and a-z, underscores (\_), and ASCII + ASCII Latin letters A-Z and a-z, underscores (_), and ASCII digits 0-9. It must start with a letter. dataset_id: Required. The resource ID of the dataset used to create the @@ -546,10 +541,11 @@ serve prediction requests after it gets deployed. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Model) - ), + }, ) _sym_db.RegisterMessage(Model) DESCRIPTOR._options = None +_MODEL._options = None # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/operations.proto b/google/cloud/automl_v1beta1/proto/operations.proto index 460321cc..cce3fedc 100644 --- a/google/cloud/automl_v1beta1/proto/operations.proto +++ b/google/cloud/automl_v1beta1/proto/operations.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,16 +11,18 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; -import "google/api/annotations.proto"; import "google/cloud/automl/v1beta1/io.proto"; +import "google/cloud/automl/v1beta1/model.proto"; +import "google/cloud/automl/v1beta1/model_evaluation.proto"; +import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; import "google/rpc/status.proto"; +import "google/api/annotations.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; option java_multiple_files = true; @@ -58,8 +60,7 @@ message OperationMetadata { ExportModelOperationMetadata export_model_details = 22; // Details of ExportEvaluatedExamples operation. - ExportEvaluatedExamplesOperationMetadata export_evaluated_examples_details = - 26; + ExportEvaluatedExamplesOperationMetadata export_evaluated_examples_details = 26; } // Output only. Progress of operation. Range: [0, 100]. @@ -80,19 +81,29 @@ message OperationMetadata { } // Details of operations that perform deletes of any entities. -message DeleteOperationMetadata {} +message DeleteOperationMetadata { + +} // Details of DeployModel operation. -message DeployModelOperationMetadata {} +message DeployModelOperationMetadata { + +} // Details of UndeployModel operation. -message UndeployModelOperationMetadata {} +message UndeployModelOperationMetadata { + +} // Details of CreateModel operation. -message CreateModelOperationMetadata {} +message CreateModelOperationMetadata { + +} // Details of ImportData operation. -message ImportDataOperationMetadata {} +message ImportDataOperationMetadata { + +} // Details of ExportData operation. message ExportDataOperationMetadata { diff --git a/google/cloud/automl_v1beta1/proto/operations_pb2.py b/google/cloud/automl_v1beta1/proto/operations_pb2.py index d1b13233..10b90dcd 100644 --- a/google/cloud/automl_v1beta1/proto/operations_pb2.py +++ b/google/cloud/automl_v1beta1/proto/operations_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/operations.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,29 +12,35 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.cloud.automl_v1beta1.proto import ( io_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2, ) +from google.cloud.automl_v1beta1.proto import ( + model_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2, +) +from google.cloud.automl_v1beta1.proto import ( + model_evaluation_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2, +) +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1beta1/proto/operations.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n2google/cloud/automl_v1beta1/proto/operations.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8b\x08\n\x11OperationMetadata\x12N\n\x0e\x64\x65lete_details\x18\x08 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.DeleteOperationMetadataH\x00\x12Y\n\x14\x64\x65ploy_model_details\x18\x18 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.DeployModelOperationMetadataH\x00\x12]\n\x16undeploy_model_details\x18\x19 \x01(\x0b\x32;.google.cloud.automl.v1beta1.UndeployModelOperationMetadataH\x00\x12Y\n\x14\x63reate_model_details\x18\n \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.CreateModelOperationMetadataH\x00\x12W\n\x13import_data_details\x18\x0f \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ImportDataOperationMetadataH\x00\x12[\n\x15\x62\x61tch_predict_details\x18\x10 \x01(\x0b\x32:.google.cloud.automl.v1beta1.BatchPredictOperationMetadataH\x00\x12W\n\x13\x65xport_data_details\x18\x15 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ExportDataOperationMetadataH\x00\x12Y\n\x14\x65xport_model_details\x18\x16 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.ExportModelOperationMetadataH\x00\x12r\n!export_evaluated_examples_details\x18\x1a \x01(\x0b\x32\x45.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadataH\x00\x12\x18\n\x10progress_percent\x18\r \x01(\x05\x12,\n\x10partial_failures\x18\x02 \x03(\x0b\x32\x12.google.rpc.Status\x12/\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\t\n\x07\x64\x65tails"\x19\n\x17\x44\x65leteOperationMetadata"\x1e\n\x1c\x44\x65ployModelOperationMetadata" \n\x1eUndeployModelOperationMetadata"\x1e\n\x1c\x43reateModelOperationMetadata"\x1d\n\x1bImportDataOperationMetadata"\xef\x01\n\x1b\x45xportDataOperationMetadata\x12\x62\n\x0boutput_info\x18\x01 \x01(\x0b\x32M.google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo\x1al\n\x14\x45xportDataOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xc3\x02\n\x1d\x42\x61tchPredictOperationMetadata\x12J\n\x0cinput_config\x18\x01 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.BatchPredictInputConfig\x12\x66\n\x0boutput_info\x18\x02 \x01(\x0b\x32Q.google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo\x1an\n\x16\x42\x61tchPredictOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xbb\x01\n\x1c\x45xportModelOperationMetadata\x12\x64\n\x0boutput_info\x18\x02 \x01(\x0b\x32O.google.cloud.automl.v1beta1.ExportModelOperationMetadata.ExportModelOutputInfo\x1a\x35\n\x15\x45xportModelOutputInfo\x12\x1c\n\x14gcs_output_directory\x18\x01 \x01(\t"\xee\x01\n(ExportEvaluatedExamplesOperationMetadata\x12|\n\x0boutput_info\x18\x02 \x01(\x0b\x32g.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo\x1a\x44\n!ExportEvaluatedExamplesOutputInfo\x12\x1f\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/operations.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a-google/cloud/automl_v1beta1/proto/model.proto\x1a\x38google/cloud/automl_v1beta1/proto/model_evaluation.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x1cgoogle/api/annotations.proto"\x8b\x08\n\x11OperationMetadata\x12N\n\x0e\x64\x65lete_details\x18\x08 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.DeleteOperationMetadataH\x00\x12Y\n\x14\x64\x65ploy_model_details\x18\x18 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.DeployModelOperationMetadataH\x00\x12]\n\x16undeploy_model_details\x18\x19 \x01(\x0b\x32;.google.cloud.automl.v1beta1.UndeployModelOperationMetadataH\x00\x12Y\n\x14\x63reate_model_details\x18\n \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.CreateModelOperationMetadataH\x00\x12W\n\x13import_data_details\x18\x0f \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ImportDataOperationMetadataH\x00\x12[\n\x15\x62\x61tch_predict_details\x18\x10 \x01(\x0b\x32:.google.cloud.automl.v1beta1.BatchPredictOperationMetadataH\x00\x12W\n\x13\x65xport_data_details\x18\x15 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ExportDataOperationMetadataH\x00\x12Y\n\x14\x65xport_model_details\x18\x16 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.ExportModelOperationMetadataH\x00\x12r\n!export_evaluated_examples_details\x18\x1a \x01(\x0b\x32\x45.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadataH\x00\x12\x18\n\x10progress_percent\x18\r \x01(\x05\x12,\n\x10partial_failures\x18\x02 \x03(\x0b\x32\x12.google.rpc.Status\x12/\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\t\n\x07\x64\x65tails"\x19\n\x17\x44\x65leteOperationMetadata"\x1e\n\x1c\x44\x65ployModelOperationMetadata" \n\x1eUndeployModelOperationMetadata"\x1e\n\x1c\x43reateModelOperationMetadata"\x1d\n\x1bImportDataOperationMetadata"\xef\x01\n\x1b\x45xportDataOperationMetadata\x12\x62\n\x0boutput_info\x18\x01 \x01(\x0b\x32M.google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo\x1al\n\x14\x45xportDataOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xc3\x02\n\x1d\x42\x61tchPredictOperationMetadata\x12J\n\x0cinput_config\x18\x01 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.BatchPredictInputConfig\x12\x66\n\x0boutput_info\x18\x02 \x01(\x0b\x32Q.google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo\x1an\n\x16\x42\x61tchPredictOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xbb\x01\n\x1c\x45xportModelOperationMetadata\x12\x64\n\x0boutput_info\x18\x02 \x01(\x0b\x32O.google.cloud.automl.v1beta1.ExportModelOperationMetadata.ExportModelOutputInfo\x1a\x35\n\x15\x45xportModelOutputInfo\x12\x1c\n\x14gcs_output_directory\x18\x01 \x01(\t"\xee\x01\n(ExportEvaluatedExamplesOperationMetadata\x12|\n\x0boutput_info\x18\x02 \x01(\x0b\x32g.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo\x1a\x44\n!ExportEvaluatedExamplesOutputInfo\x12\x1f\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2.DESCRIPTOR, + google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2.DESCRIPTOR, + google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2.DESCRIPTOR, + google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, google_dot_rpc_dot_status__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], ) @@ -298,10 +301,10 @@ index=0, containing_type=None, fields=[], - ) + ), ], - serialized_start=216, - serialized_end=1251, + serialized_start=350, + serialized_end=1385, ) @@ -320,8 +323,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1253, - serialized_end=1278, + serialized_start=1387, + serialized_end=1412, ) @@ -340,8 +343,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1280, - serialized_end=1310, + serialized_start=1414, + serialized_end=1444, ) @@ -360,8 +363,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1312, - serialized_end=1344, + serialized_start=1446, + serialized_end=1478, ) @@ -380,8 +383,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1346, - serialized_end=1376, + serialized_start=1480, + serialized_end=1510, ) @@ -400,8 +403,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1378, - serialized_end=1407, + serialized_start=1512, + serialized_end=1541, ) @@ -421,7 +424,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -439,7 +442,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -463,10 +466,10 @@ index=0, containing_type=None, fields=[], - ) + ), ], - serialized_start=1541, - serialized_end=1649, + serialized_start=1675, + serialized_end=1783, ) _EXPORTDATAOPERATIONMETADATA = _descriptor.Descriptor( @@ -493,18 +496,18 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], - nested_types=[_EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO], + nested_types=[_EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO,], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1410, - serialized_end=1649, + serialized_start=1544, + serialized_end=1783, ) @@ -524,7 +527,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -542,7 +545,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -566,10 +569,10 @@ index=0, containing_type=None, fields=[], - ) + ), ], - serialized_start=1865, - serialized_end=1975, + serialized_start=1999, + serialized_end=2109, ) _BATCHPREDICTOPERATIONMETADATA = _descriptor.Descriptor( @@ -617,15 +620,15 @@ ), ], extensions=[], - nested_types=[_BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO], + nested_types=[_BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO,], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1652, - serialized_end=1975, + serialized_start=1786, + serialized_end=2109, ) @@ -645,7 +648,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -653,7 +656,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -663,8 +666,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2112, - serialized_end=2165, + serialized_start=2246, + serialized_end=2299, ) _EXPORTMODELOPERATIONMETADATA = _descriptor.Descriptor( @@ -691,18 +694,18 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], - nested_types=[_EXPORTMODELOPERATIONMETADATA_EXPORTMODELOUTPUTINFO], + nested_types=[_EXPORTMODELOPERATIONMETADATA_EXPORTMODELOUTPUTINFO,], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1978, - serialized_end=2165, + serialized_start=2112, + serialized_end=2299, ) @@ -722,7 +725,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -730,7 +733,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -740,8 +743,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2338, - serialized_end=2406, + serialized_start=2472, + serialized_end=2540, ) _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA = _descriptor.Descriptor( @@ -768,11 +771,11 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[ - _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA_EXPORTEVALUATEDEXAMPLESOUTPUTINFO + _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA_EXPORTEVALUATEDEXAMPLESOUTPUTINFO, ], enum_types=[], serialized_options=None, @@ -780,8 +783,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2168, - serialized_end=2406, + serialized_start=2302, + serialized_end=2540, ) _OPERATIONMETADATA.fields_by_name[ @@ -984,10 +987,10 @@ OperationMetadata = _reflection.GeneratedProtocolMessageType( "OperationMetadata", (_message.Message,), - dict( - DESCRIPTOR=_OPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Metadata used across all long running operations returned + { + "DESCRIPTOR": _OPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Metadata used across all long running operations returned by AutoML API. @@ -1019,7 +1022,7 @@ currently. partial_failures: Output only. Partial failures encountered. E.g. single files - that couldn't be read. This field should never exceed 20 + that couldn’t be read. This field should never exceed 20 entries. Status details field will contain standard GCP error details. create_time: @@ -1029,92 +1032,92 @@ time. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.OperationMetadata) - ), + }, ) _sym_db.RegisterMessage(OperationMetadata) DeleteOperationMetadata = _reflection.GeneratedProtocolMessageType( "DeleteOperationMetadata", (_message.Message,), - dict( - DESCRIPTOR=_DELETEOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of operations that perform deletes of any + { + "DESCRIPTOR": _DELETEOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of operations that perform deletes of any entities. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeleteOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(DeleteOperationMetadata) DeployModelOperationMetadata = _reflection.GeneratedProtocolMessageType( "DeployModelOperationMetadata", (_message.Message,), - dict( - DESCRIPTOR=_DEPLOYMODELOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of DeployModel operation. + { + "DESCRIPTOR": _DEPLOYMODELOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of DeployModel operation. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeployModelOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(DeployModelOperationMetadata) UndeployModelOperationMetadata = _reflection.GeneratedProtocolMessageType( "UndeployModelOperationMetadata", (_message.Message,), - dict( - DESCRIPTOR=_UNDEPLOYMODELOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of UndeployModel operation. + { + "DESCRIPTOR": _UNDEPLOYMODELOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of UndeployModel operation. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UndeployModelOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(UndeployModelOperationMetadata) CreateModelOperationMetadata = _reflection.GeneratedProtocolMessageType( "CreateModelOperationMetadata", (_message.Message,), - dict( - DESCRIPTOR=_CREATEMODELOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of CreateModel operation. + { + "DESCRIPTOR": _CREATEMODELOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of CreateModel operation. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CreateModelOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(CreateModelOperationMetadata) ImportDataOperationMetadata = _reflection.GeneratedProtocolMessageType( "ImportDataOperationMetadata", (_message.Message,), - dict( - DESCRIPTOR=_IMPORTDATAOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of ImportData operation. + { + "DESCRIPTOR": _IMPORTDATAOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of ImportData operation. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImportDataOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(ImportDataOperationMetadata) ExportDataOperationMetadata = _reflection.GeneratedProtocolMessageType( "ExportDataOperationMetadata", (_message.Message,), - dict( - ExportDataOutputInfo=_reflection.GeneratedProtocolMessageType( + { + "ExportDataOutputInfo": _reflection.GeneratedProtocolMessageType( "ExportDataOutputInfo", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Further describes this export data's output. Supplements + { + "DESCRIPTOR": _EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Further describes this export data’s output. Supplements [OutputConfig][google.cloud.automl.v1beta1.OutputConfig]. @@ -1130,20 +1133,20 @@ data is written. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo) - ), + }, ), - DESCRIPTOR=_EXPORTDATAOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of ExportData operation. + "DESCRIPTOR": _EXPORTDATAOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of ExportData operation. Attributes: output_info: - Output only. Information further describing this export data's + Output only. Information further describing this export data’s output. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportDataOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(ExportDataOperationMetadata) _sym_db.RegisterMessage(ExportDataOperationMetadata.ExportDataOutputInfo) @@ -1151,14 +1154,14 @@ BatchPredictOperationMetadata = _reflection.GeneratedProtocolMessageType( "BatchPredictOperationMetadata", (_message.Message,), - dict( - BatchPredictOutputInfo=_reflection.GeneratedProtocolMessageType( + { + "BatchPredictOutputInfo": _reflection.GeneratedProtocolMessageType( "BatchPredictOutputInfo", (_message.Message,), - dict( - DESCRIPTOR=_BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Further describes this batch predict's output. Supplements + { + "DESCRIPTOR": _BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Further describes this batch predict’s output. Supplements [BatchPredictOutputConfig][google.cloud.automl.v1beta1.BatchPredictOutputConfig]. @@ -1175,11 +1178,11 @@ output is written. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo) - ), + }, ), - DESCRIPTOR=_BATCHPREDICTOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of BatchPredict operation. + "DESCRIPTOR": _BATCHPREDICTOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of BatchPredict operation. Attributes: @@ -1188,10 +1191,10 @@ this batch predict operation. output_info: Output only. Information further describing this batch - predict's output. + predict’s output. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BatchPredictOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(BatchPredictOperationMetadata) _sym_db.RegisterMessage(BatchPredictOperationMetadata.BatchPredictOutputInfo) @@ -1199,14 +1202,14 @@ ExportModelOperationMetadata = _reflection.GeneratedProtocolMessageType( "ExportModelOperationMetadata", (_message.Message,), - dict( - ExportModelOutputInfo=_reflection.GeneratedProtocolMessageType( + { + "ExportModelOutputInfo": _reflection.GeneratedProtocolMessageType( "ExportModelOutputInfo", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTMODELOPERATIONMETADATA_EXPORTMODELOUTPUTINFO, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Further describes the output of model export. Supplements + { + "DESCRIPTOR": _EXPORTMODELOPERATIONMETADATA_EXPORTMODELOUTPUTINFO, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Further describes the output of model export. Supplements [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. @@ -1217,11 +1220,11 @@ into which the model will be exported. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportModelOperationMetadata.ExportModelOutputInfo) - ), + }, ), - DESCRIPTOR=_EXPORTMODELOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of ExportModel operation. + "DESCRIPTOR": _EXPORTMODELOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of ExportModel operation. Attributes: @@ -1230,7 +1233,7 @@ model export. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportModelOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(ExportModelOperationMetadata) _sym_db.RegisterMessage(ExportModelOperationMetadata.ExportModelOutputInfo) @@ -1238,14 +1241,14 @@ ExportEvaluatedExamplesOperationMetadata = _reflection.GeneratedProtocolMessageType( "ExportEvaluatedExamplesOperationMetadata", (_message.Message,), - dict( - ExportEvaluatedExamplesOutputInfo=_reflection.GeneratedProtocolMessageType( + { + "ExportEvaluatedExamplesOutputInfo": _reflection.GeneratedProtocolMessageType( "ExportEvaluatedExamplesOutputInfo", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA_EXPORTEVALUATEDEXAMPLESOUTPUTINFO, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Further describes the output of the evaluated examples + { + "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA_EXPORTEVALUATEDEXAMPLESOUTPUTINFO, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Further describes the output of the evaluated examples export. Supplements [ExportEvaluatedExamplesOutputConfig][google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig]. @@ -1258,11 +1261,11 @@ export evaluated examples is written. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo) - ), + }, ), - DESCRIPTOR=_EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of EvaluatedExamples operation. + "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of EvaluatedExamples operation. Attributes: @@ -1271,7 +1274,7 @@ evaluated examples export. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(ExportEvaluatedExamplesOperationMetadata) _sym_db.RegisterMessage( diff --git a/google/cloud/automl_v1beta1/proto/prediction_service.proto b/google/cloud/automl_v1beta1/proto/prediction_service.proto index 57f1b794..0bcf685e 100644 --- a/google/cloud/automl_v1beta1/proto/prediction_service.proto +++ b/google/cloud/automl_v1beta1/proto/prediction_service.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -19,6 +18,8 @@ package google.cloud.automl.v1beta1; import "google/api/annotations.proto"; import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; import "google/cloud/automl/v1beta1/annotation_payload.proto"; import "google/cloud/automl/v1beta1/data_items.proto"; import "google/cloud/automl/v1beta1/io.proto"; @@ -38,8 +39,7 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // snake_case or kebab-case, either of those cases is accepted. service PredictionService { option (google.api.default_host) = "automl.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; // Perform an online prediction. The prediction result will be directly // returned in the response. @@ -65,16 +65,15 @@ service PredictionService { post: "/v1beta1/{name=projects/*/locations/*/models/*}:predict" body: "*" }; + option (google.api.method_signature) = "name,payload,params"; } - // Perform a batch prediction. Unlike the online - // [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], batch + // Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], batch // prediction result won't be immediately available in the response. Instead, // a long running operation object is returned. User can poll the operation // result via [GetOperation][google.longrunning.Operations.GetOperation] - // method. Once the operation is done, - // [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] is - // returned in the [response][google.longrunning.Operation.response] field. + // method. Once the operation is done, [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] is returned in + // the [response][google.longrunning.Operation.response] field. // Available for following ML problems: // * Image Classification // * Image Object Detection @@ -86,18 +85,27 @@ service PredictionService { post: "/v1beta1/{name=projects/*/locations/*/models/*}:batchPredict" body: "*" }; + option (google.api.method_signature) = "name,input_config,output_config,params"; + option (google.longrunning.operation_info) = { + response_type: "BatchPredictResult" + metadata_type: "OperationMetadata" + }; } } -// Request message for -// [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. +// Request message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. message PredictRequest { - // Name of the model requested to serve the prediction. - string name = 1; + // Required. Name of the model requested to serve the prediction. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; // Required. Payload to perform a prediction on. The payload must match the // problem type that the model was trained to solve. - ExamplePayload payload = 2; + ExamplePayload payload = 2 [(google.api.field_behavior) = REQUIRED]; // Additional domain-specific parameters, any string must be up to 25000 // characters long. @@ -116,18 +124,13 @@ message PredictRequest { // boxes will be returned in the response. Default is 100, the // requested value may be limited by server. // * For Tables: - // `feature_importance` - (boolean) Whether - // - // [feature_importance][[google.cloud.automl.v1beta1.TablesModelColumnInfo.feature_importance] - // should be populated in the returned - // - // [TablesAnnotation(-s)][[google.cloud.automl.v1beta1.TablesAnnotation]. + // feature_importance - (boolean) Whether feature importance + // should be populated in the returned TablesAnnotation. // The default is false. map params = 3; } -// Response message for -// [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. +// Response message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. message PredictResponse { // Prediction result. // Translation and Text Sentiment will return precisely one payload. @@ -158,20 +161,24 @@ message PredictResponse { map metadata = 2; } -// Request message for -// [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. +// Request message for [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. message BatchPredictRequest { - // Name of the model requested to serve the batch prediction. - string name = 1; + // Required. Name of the model requested to serve the batch prediction. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; // Required. The input configuration for batch prediction. - BatchPredictInputConfig input_config = 3; + BatchPredictInputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The Configuration specifying where output predictions should // be written. - BatchPredictOutputConfig output_config = 4; + BatchPredictOutputConfig output_config = 4 [(google.api.field_behavior) = REQUIRED]; - // Additional domain-specific parameters for the predictions, any string must + // Required. Additional domain-specific parameters for the predictions, any string must // be up to 25000 characters long. // // * For Text Classification: @@ -196,6 +203,7 @@ message BatchPredictRequest { // requested value may be limited by server. // // * For Video Classification : + // // `score_threshold` - (float) A value from 0.0 to 1.0. When the model // makes predictions for a video, it will only produce results that // have at least this confidence score. The default is 0.5. @@ -223,7 +231,14 @@ message BatchPredictRequest { // metrics provided to describe that quality. The default is // "false". // + // * For Tables: + // + // feature_importance - (boolean) Whether feature importance + // should be populated in the returned TablesAnnotations. The + // default is false. + // // * For Video Object Tracking: + // // `score_threshold` - (float) When Model detects objects on video frames, // it will only produce bounding boxes which have at least this // confidence score. Value in 0 to 1 range, default is 0.5. @@ -233,14 +248,12 @@ message BatchPredictRequest { // `min_bounding_box_size` - (float) Only bounding boxes with shortest edge // at least that long as a relative value of video frame size will be // returned. Value in 0 to 1 range. Default is 0. - // - map params = 5; + map params = 5 [(google.api.field_behavior) = REQUIRED]; } // Result of the Batch Predict. This message is returned in // [response][google.longrunning.Operation.response] of the operation returned -// by the -// [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. +// by the [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. message BatchPredictResult { // Additional domain-specific prediction response metadata. // diff --git a/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py b/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py index 751f16ef..b27a20a8 100644 --- a/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py +++ b/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/prediction_service.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -17,6 +14,8 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1beta1.proto import ( annotation_payload_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__payload__pb2, ) @@ -38,15 +37,13 @@ name="google/cloud/automl_v1beta1/proto/prediction_service.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\026PredictionServiceProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n:google/cloud/automl_v1beta1/proto/prediction_service.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a:google/cloud/automl_v1beta1/proto/annotation_payload.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x32google/cloud/automl_v1beta1/proto/operations.proto\x1a#google/longrunning/operations.proto"\xd4\x01\n\x0ePredictRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12<\n\x07payload\x18\x02 \x01(\x0b\x32+.google.cloud.automl.v1beta1.ExamplePayload\x12G\n\x06params\x18\x03 \x03(\x0b\x32\x37.google.cloud.automl.v1beta1.PredictRequest.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x9a\x02\n\x0fPredictResponse\x12?\n\x07payload\x18\x01 \x03(\x0b\x32..google.cloud.automl.v1beta1.AnnotationPayload\x12G\n\x12preprocessed_input\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.ExamplePayload\x12L\n\x08metadata\x18\x02 \x03(\x0b\x32:.google.cloud.automl.v1beta1.PredictResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xba\x02\n\x13\x42\x61tchPredictRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0cinput_config\x18\x03 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.BatchPredictInputConfig\x12L\n\routput_config\x18\x04 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.BatchPredictOutputConfig\x12L\n\x06params\x18\x05 \x03(\x0b\x32<.google.cloud.automl.v1beta1.BatchPredictRequest.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x96\x01\n\x12\x42\x61tchPredictResult\x12O\n\x08metadata\x18\x01 \x03(\x0b\x32=.google.cloud.automl.v1beta1.BatchPredictResult.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x32\xb4\x03\n\x11PredictionService\x12\xa8\x01\n\x07Predict\x12+.google.cloud.automl.v1beta1.PredictRequest\x1a,.google.cloud.automl.v1beta1.PredictResponse"B\x82\xd3\xe4\x93\x02<"7/v1beta1/{name=projects/*/locations/*/models/*}:predict:\x01*\x12\xa8\x01\n\x0c\x42\x61tchPredict\x12\x30.google.cloud.automl.v1beta1.BatchPredictRequest\x1a\x1d.google.longrunning.Operation"G\x82\xd3\xe4\x93\x02\x41"\n\x0cinput_config\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.InputConfig"c\n\x11\x45xportDataRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12@\n\routput_config\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.OutputConfig"(\n\x18GetAnnotationSpecRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"S\n\x13GetTableSpecRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\x8e\x01\n\x15ListTableSpecsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"n\n\x16ListTableSpecsResponse\x12;\n\x0btable_specs\x18\x01 \x03(\x0b\x32&.google.cloud.automl.v1beta1.TableSpec\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x85\x01\n\x16UpdateTableSpecRequest\x12:\n\ntable_spec\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.TableSpec\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"T\n\x14GetColumnSpecRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\x8f\x01\n\x16ListColumnSpecsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"q\n\x17ListColumnSpecsResponse\x12=\n\x0c\x63olumn_specs\x18\x01 \x03(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x88\x01\n\x17UpdateColumnSpecRequest\x12<\n\x0b\x63olumn_spec\x18\x01 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"W\n\x12\x43reateModelRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x31\n\x05model\x18\x04 \x01(\x0b\x32".google.cloud.automl.v1beta1.Model"\x1f\n\x0fGetModelRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"Z\n\x11ListModelsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"`\n\x12ListModelsResponse\x12\x31\n\x05model\x18\x01 \x03(\x0b\x32".google.cloud.automl.v1beta1.Model\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t""\n\x12\x44\x65leteModelRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xca\x02\n\x12\x44\x65ployModelRequest\x12\x84\x01\n0image_object_detection_model_deployment_metadata\x18\x02 \x01(\x0b\x32H.google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadataH\x00\x12\x81\x01\n.image_classification_model_deployment_metadata\x18\x04 \x01(\x0b\x32G.google.cloud.automl.v1beta1.ImageClassificationModelDeploymentMetadataH\x00\x12\x0c\n\x04name\x18\x01 \x01(\tB\x1b\n\x19model_deployment_metadata"$\n\x14UndeployModelRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"o\n\x12\x45xportModelRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12K\n\routput_config\x18\x03 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.ModelExportOutputConfig"\x87\x01\n\x1e\x45xportEvaluatedExamplesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12W\n\routput_config\x18\x03 \x01(\x0b\x32@.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig")\n\x19GetModelEvaluationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"d\n\x1bListModelEvaluationsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"\x7f\n\x1cListModelEvaluationsResponse\x12\x46\n\x10model_evaluation\x18\x01 \x03(\x0b\x32,.google.cloud.automl.v1beta1.ModelEvaluation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xd9"\n\x06\x41utoMl\x12\xac\x01\n\rCreateDataset\x12\x31.google.cloud.automl.v1beta1.CreateDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"B\x82\xd3\xe4\x93\x02<"1/v1beta1/{parent=projects/*/locations/*}/datasets:\x07\x64\x61taset\x12\x9d\x01\n\nGetDataset\x12..google.cloud.automl.v1beta1.GetDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"9\x82\xd3\xe4\x93\x02\x33\x12\x31/v1beta1/{name=projects/*/locations/*/datasets/*}\x12\xae\x01\n\x0cListDatasets\x12\x30.google.cloud.automl.v1beta1.ListDatasetsRequest\x1a\x31.google.cloud.automl.v1beta1.ListDatasetsResponse"9\x82\xd3\xe4\x93\x02\x33\x12\x31/v1beta1/{parent=projects/*/locations/*}/datasets\x12\xb4\x01\n\rUpdateDataset\x12\x31.google.cloud.automl.v1beta1.UpdateDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"J\x82\xd3\xe4\x93\x02\x44\x32\x39/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}:\x07\x64\x61taset\x12\x9c\x01\n\rDeleteDataset\x12\x31.google.cloud.automl.v1beta1.DeleteDatasetRequest\x1a\x1d.google.longrunning.Operation"9\x82\xd3\xe4\x93\x02\x33*1/v1beta1/{name=projects/*/locations/*/datasets/*}\x12\xa4\x01\n\nImportData\x12..google.cloud.automl.v1beta1.ImportDataRequest\x1a\x1d.google.longrunning.Operation"G\x82\xd3\xe4\x93\x02\x41"/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}\x12\xc1\x01\n\x0eListTableSpecs\x12\x32.google.cloud.automl.v1beta1.ListTableSpecsRequest\x1a\x33.google.cloud.automl.v1beta1.ListTableSpecsResponse"F\x82\xd3\xe4\x93\x02@\x12>/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs\x12\xcd\x01\n\x0fUpdateTableSpec\x12\x33.google.cloud.automl.v1beta1.UpdateTableSpecRequest\x1a&.google.cloud.automl.v1beta1.TableSpec"]\x82\xd3\xe4\x93\x02W2I/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}:\ntable_spec\x12\xc1\x01\n\rGetColumnSpec\x12\x31.google.cloud.automl.v1beta1.GetColumnSpecRequest\x1a\'.google.cloud.automl.v1beta1.ColumnSpec"T\x82\xd3\xe4\x93\x02N\x12L/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}\x12\xd2\x01\n\x0fListColumnSpecs\x12\x33.google.cloud.automl.v1beta1.ListColumnSpecsRequest\x1a\x34.google.cloud.automl.v1beta1.ListColumnSpecsResponse"T\x82\xd3\xe4\x93\x02N\x12L/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs\x12\xe0\x01\n\x10UpdateColumnSpec\x12\x34.google.cloud.automl.v1beta1.UpdateColumnSpecRequest\x1a\'.google.cloud.automl.v1beta1.ColumnSpec"m\x82\xd3\xe4\x93\x02g2X/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}:\x0b\x63olumn_spec\x12\x9d\x01\n\x0b\x43reateModel\x12/.google.cloud.automl.v1beta1.CreateModelRequest\x1a\x1d.google.longrunning.Operation">\x82\xd3\xe4\x93\x02\x38"//v1beta1/{parent=projects/*/locations/*}/models:\x05model\x12\x95\x01\n\x08GetModel\x12,.google.cloud.automl.v1beta1.GetModelRequest\x1a".google.cloud.automl.v1beta1.Model"7\x82\xd3\xe4\x93\x02\x31\x12//v1beta1/{name=projects/*/locations/*/models/*}\x12\xa6\x01\n\nListModels\x12..google.cloud.automl.v1beta1.ListModelsRequest\x1a/.google.cloud.automl.v1beta1.ListModelsResponse"7\x82\xd3\xe4\x93\x02\x31\x12//v1beta1/{parent=projects/*/locations/*}/models\x12\x96\x01\n\x0b\x44\x65leteModel\x12/.google.cloud.automl.v1beta1.DeleteModelRequest\x1a\x1d.google.longrunning.Operation"7\x82\xd3\xe4\x93\x02\x31*//v1beta1/{name=projects/*/locations/*/models/*}\x12\xa0\x01\n\x0b\x44\x65ployModel\x12/.google.cloud.automl.v1beta1.DeployModelRequest\x1a\x1d.google.longrunning.Operation"A\x82\xd3\xe4\x93\x02;"6/v1beta1/{name=projects/*/locations/*/models/*}:deploy:\x01*\x12\xa6\x01\n\rUndeployModel\x12\x31.google.cloud.automl.v1beta1.UndeployModelRequest\x1a\x1d.google.longrunning.Operation"C\x82\xd3\xe4\x93\x02="8/v1beta1/{name=projects/*/locations/*/models/*}:undeploy:\x01*\x12\xa0\x01\n\x0b\x45xportModel\x12/.google.cloud.automl.v1beta1.ExportModelRequest\x1a\x1d.google.longrunning.Operation"A\x82\xd3\xe4\x93\x02;"6/v1beta1/{name=projects/*/locations/*/models/*}:export:\x01*\x12\xc9\x01\n\x17\x45xportEvaluatedExamples\x12;.google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest\x1a\x1d.google.longrunning.Operation"R\x82\xd3\xe4\x93\x02L"G/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples:\x01*\x12\xc6\x01\n\x12GetModelEvaluation\x12\x36.google.cloud.automl.v1beta1.GetModelEvaluationRequest\x1a,.google.cloud.automl.v1beta1.ModelEvaluation"J\x82\xd3\xe4\x93\x02\x44\x12\x42/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\x12\xd7\x01\n\x14ListModelEvaluations\x12\x38.google.cloud.automl.v1beta1.ListModelEvaluationsRequest\x1a\x39.google.cloud.automl.v1beta1.ListModelEvaluationsResponse"J\x82\xd3\xe4\x93\x02\x44\x12\x42/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations\x1aI\xca\x41\x15\x61utoml.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xb2\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x0b\x41utoMlProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\013AutoMlProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n/google/cloud/automl_v1beta1/proto/service.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a:google/cloud/automl_v1beta1/proto/annotation_payload.proto\x1a\x37google/cloud/automl_v1beta1/proto/annotation_spec.proto\x1a\x33google/cloud/automl_v1beta1/proto/column_spec.proto\x1a/google/cloud/automl_v1beta1/proto/dataset.proto\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a-google/cloud/automl_v1beta1/proto/model.proto\x1a\x38google/cloud/automl_v1beta1/proto/model_evaluation.proto\x1a\x32google/cloud/automl_v1beta1/proto/operations.proto\x1a\x32google/cloud/automl_v1beta1/proto/table_spec.proto\x1a#google/longrunning/operations.proto\x1a google/protobuf/field_mask.proto"\x8d\x01\n\x14\x43reateDatasetRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12:\n\x07\x64\x61taset\x18\x02 \x01(\x0b\x32$.google.cloud.automl.v1beta1.DatasetB\x03\xe0\x41\x02"H\n\x11GetDatasetRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset"\x87\x01\n\x13ListDatasetsRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"g\n\x14ListDatasetsResponse\x12\x36\n\x08\x64\x61tasets\x18\x01 \x03(\x0b\x32$.google.cloud.automl.v1beta1.Dataset\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x83\x01\n\x14UpdateDatasetRequest\x12:\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32$.google.cloud.automl.v1beta1.DatasetB\x03\xe0\x41\x02\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"K\n\x14\x44\x65leteDatasetRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset"\x8d\x01\n\x11ImportDataRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset\x12\x43\n\x0cinput_config\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.InputConfigB\x03\xe0\x41\x02"\x8f\x01\n\x11\x45xportDataRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset\x12\x45\n\routput_config\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.OutputConfigB\x03\xe0\x41\x02"V\n\x18GetAnnotationSpecRequest\x12:\n\x04name\x18\x01 \x01(\tB,\xe0\x41\x02\xfa\x41&\n$automl.googleapis.com/AnnotationSpec"|\n\x13GetTableSpecRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x61utoml.googleapis.com/TableSpec\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\xb5\x01\n\x15ListTableSpecsRequest\x12\x35\n\x06parent\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"n\n\x16ListTableSpecsResponse\x12;\n\x0btable_specs\x18\x01 \x03(\x0b\x32&.google.cloud.automl.v1beta1.TableSpec\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x8a\x01\n\x16UpdateTableSpecRequest\x12?\n\ntable_spec\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.TableSpecB\x03\xe0\x41\x02\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"~\n\x14GetColumnSpecRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n automl.googleapis.com/ColumnSpec\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\xb8\x01\n\x16ListColumnSpecsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x61utoml.googleapis.com/TableSpec\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"q\n\x17ListColumnSpecsResponse\x12=\n\x0c\x63olumn_specs\x18\x01 \x03(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x8d\x01\n\x17UpdateColumnSpecRequest\x12\x41\n\x0b\x63olumn_spec\x18\x01 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpecB\x03\xe0\x41\x02\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\x87\x01\n\x12\x43reateModelRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x36\n\x05model\x18\x04 \x01(\x0b\x32".google.cloud.automl.v1beta1.ModelB\x03\xe0\x41\x02"D\n\x0fGetModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\x85\x01\n\x11ListModelsRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"`\n\x12ListModelsResponse\x12\x31\n\x05model\x18\x01 \x03(\x0b\x32".google.cloud.automl.v1beta1.Model\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"G\n\x12\x44\x65leteModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\xef\x02\n\x12\x44\x65ployModelRequest\x12\x84\x01\n0image_object_detection_model_deployment_metadata\x18\x02 \x01(\x0b\x32H.google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadataH\x00\x12\x81\x01\n.image_classification_model_deployment_metadata\x18\x04 \x01(\x0b\x32G.google.cloud.automl.v1beta1.ImageClassificationModelDeploymentMetadataH\x00\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/ModelB\x1b\n\x19model_deployment_metadata"I\n\x14UndeployModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\x99\x01\n\x12\x45xportModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12P\n\routput_config\x18\x03 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.ModelExportOutputConfigB\x03\xe0\x41\x02"\xb1\x01\n\x1e\x45xportEvaluatedExamplesRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12\\\n\routput_config\x18\x03 \x01(\x0b\x32@.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfigB\x03\xe0\x41\x02"X\n\x19GetModelEvaluationRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%automl.googleapis.com/ModelEvaluation"\x89\x01\n\x1bListModelEvaluationsRequest\x12\x33\n\x06parent\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"\x7f\n\x1cListModelEvaluationsResponse\x12\x46\n\x10model_evaluation\x18\x01 \x03(\x0b\x32,.google.cloud.automl.v1beta1.ModelEvaluation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xed\'\n\x06\x41utoMl\x12\xbd\x01\n\rCreateDataset\x12\x31.google.cloud.automl.v1beta1.CreateDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"S\x82\xd3\xe4\x93\x02<"1/v1beta1/{parent=projects/*/locations/*}/datasets:\x07\x64\x61taset\xda\x41\x0eparent,dataset\x12\xa4\x01\n\nGetDataset\x12..google.cloud.automl.v1beta1.GetDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"@\x82\xd3\xe4\x93\x02\x33\x12\x31/v1beta1/{name=projects/*/locations/*/datasets/*}\xda\x41\x04name\x12\xb7\x01\n\x0cListDatasets\x12\x30.google.cloud.automl.v1beta1.ListDatasetsRequest\x1a\x31.google.cloud.automl.v1beta1.ListDatasetsResponse"B\x82\xd3\xe4\x93\x02\x33\x12\x31/v1beta1/{parent=projects/*/locations/*}/datasets\xda\x41\x06parent\x12\xbe\x01\n\rUpdateDataset\x12\x31.google.cloud.automl.v1beta1.UpdateDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"T\x82\xd3\xe4\x93\x02\x44\x32\x39/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}:\x07\x64\x61taset\xda\x41\x07\x64\x61taset\x12\xd0\x01\n\rDeleteDataset\x12\x31.google.cloud.automl.v1beta1.DeleteDatasetRequest\x1a\x1d.google.longrunning.Operation"m\x82\xd3\xe4\x93\x02\x33*1/v1beta1/{name=projects/*/locations/*/datasets/*}\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xe6\x01\n\nImportData\x12..google.cloud.automl.v1beta1.ImportDataRequest\x1a\x1d.google.longrunning.Operation"\x88\x01\x82\xd3\xe4\x93\x02\x41"/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}\xda\x41\x04name\x12\xca\x01\n\x0eListTableSpecs\x12\x32.google.cloud.automl.v1beta1.ListTableSpecsRequest\x1a\x33.google.cloud.automl.v1beta1.ListTableSpecsResponse"O\x82\xd3\xe4\x93\x02@\x12>/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs\xda\x41\x06parent\x12\xda\x01\n\x0fUpdateTableSpec\x12\x33.google.cloud.automl.v1beta1.UpdateTableSpecRequest\x1a&.google.cloud.automl.v1beta1.TableSpec"j\x82\xd3\xe4\x93\x02W2I/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}:\ntable_spec\xda\x41\ntable_spec\x12\xc8\x01\n\rGetColumnSpec\x12\x31.google.cloud.automl.v1beta1.GetColumnSpecRequest\x1a\'.google.cloud.automl.v1beta1.ColumnSpec"[\x82\xd3\xe4\x93\x02N\x12L/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}\xda\x41\x04name\x12\xdb\x01\n\x0fListColumnSpecs\x12\x33.google.cloud.automl.v1beta1.ListColumnSpecsRequest\x1a\x34.google.cloud.automl.v1beta1.ListColumnSpecsResponse"]\x82\xd3\xe4\x93\x02N\x12L/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs\xda\x41\x06parent\x12\xee\x01\n\x10UpdateColumnSpec\x12\x34.google.cloud.automl.v1beta1.UpdateColumnSpecRequest\x1a\'.google.cloud.automl.v1beta1.ColumnSpec"{\x82\xd3\xe4\x93\x02g2X/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}:\x0b\x63olumn_spec\xda\x41\x0b\x63olumn_spec\x12\xc9\x01\n\x0b\x43reateModel\x12/.google.cloud.automl.v1beta1.CreateModelRequest\x1a\x1d.google.longrunning.Operation"j\x82\xd3\xe4\x93\x02\x38"//v1beta1/{parent=projects/*/locations/*}/models:\x05model\xda\x41\x0cparent,model\xca\x41\x1a\n\x05Model\x12\x11OperationMetadata\x12\x9c\x01\n\x08GetModel\x12,.google.cloud.automl.v1beta1.GetModelRequest\x1a".google.cloud.automl.v1beta1.Model">\x82\xd3\xe4\x93\x02\x31\x12//v1beta1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\x12\xaf\x01\n\nListModels\x12..google.cloud.automl.v1beta1.ListModelsRequest\x1a/.google.cloud.automl.v1beta1.ListModelsResponse"@\x82\xd3\xe4\x93\x02\x31\x12//v1beta1/{parent=projects/*/locations/*}/models\xda\x41\x06parent\x12\xca\x01\n\x0b\x44\x65leteModel\x12/.google.cloud.automl.v1beta1.DeleteModelRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x31*//v1beta1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xd4\x01\n\x0b\x44\x65ployModel\x12/.google.cloud.automl.v1beta1.DeployModelRequest\x1a\x1d.google.longrunning.Operation"u\x82\xd3\xe4\x93\x02;"6/v1beta1/{name=projects/*/locations/*/models/*}:deploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xda\x01\n\rUndeployModel\x12\x31.google.cloud.automl.v1beta1.UndeployModelRequest\x1a\x1d.google.longrunning.Operation"w\x82\xd3\xe4\x93\x02="8/v1beta1/{name=projects/*/locations/*/models/*}:undeploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xe3\x01\n\x0b\x45xportModel\x12/.google.cloud.automl.v1beta1.ExportModelRequest\x1a\x1d.google.longrunning.Operation"\x83\x01\x82\xd3\xe4\x93\x02;"6/v1beta1/{name=projects/*/locations/*/models/*}:export:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\x8c\x02\n\x17\x45xportEvaluatedExamples\x12;.google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest\x1a\x1d.google.longrunning.Operation"\x94\x01\x82\xd3\xe4\x93\x02L"G/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xcd\x01\n\x12GetModelEvaluation\x12\x36.google.cloud.automl.v1beta1.GetModelEvaluationRequest\x1a,.google.cloud.automl.v1beta1.ModelEvaluation"Q\x82\xd3\xe4\x93\x02\x44\x12\x42/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\xda\x41\x04name\x12\xe0\x01\n\x14ListModelEvaluations\x12\x38.google.cloud.automl.v1beta1.ListModelEvaluationsRequest\x1a\x39.google.cloud.automl.v1beta1.ListModelEvaluationsResponse"S\x82\xd3\xe4\x93\x02\x44\x12\x42/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations\xda\x41\x06parent\x1aI\xca\x41\x15\x61utoml.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xb2\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x0b\x41utoMlProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_api_dot_client__pb2.DESCRIPTOR, + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__payload__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2.DESCRIPTOR, @@ -77,7 +75,6 @@ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2.DESCRIPTOR, google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, ], ) @@ -98,13 +95,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -122,7 +119,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, ), ], @@ -134,8 +131,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=725, - serialized_end=818, + serialized_start=786, + serialized_end=927, ) @@ -155,15 +152,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -173,8 +170,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=820, - serialized_end=853, + serialized_start=929, + serialized_end=1001, ) @@ -194,13 +191,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -212,7 +209,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -248,7 +245,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -266,8 +263,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=855, - serialized_end=947, + serialized_start=1004, + serialized_end=1139, ) @@ -305,7 +302,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -323,8 +320,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=949, - serialized_end=1052, + serialized_start=1141, + serialized_end=1244, ) @@ -350,7 +347,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -380,8 +377,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1054, - serialized_end=1180, + serialized_start=1247, + serialized_end=1378, ) @@ -401,15 +398,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -419,8 +416,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1182, - serialized_end=1218, + serialized_start=1380, + serialized_end=1455, ) @@ -440,13 +437,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -464,7 +461,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, ), ], @@ -476,8 +473,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1220, - serialized_end=1317, + serialized_start=1458, + serialized_end=1599, ) @@ -497,13 +494,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -521,7 +518,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, ), ], @@ -533,8 +530,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1319, - serialized_end=1418, + serialized_start=1602, + serialized_end=1745, ) @@ -554,15 +551,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A&\n$automl.googleapis.com/AnnotationSpec", file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -572,8 +569,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1420, - serialized_end=1460, + serialized_start=1747, + serialized_end=1833, ) @@ -593,13 +590,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A!\n\037automl.googleapis.com/TableSpec", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -629,8 +626,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1462, - serialized_end=1545, + serialized_start=1835, + serialized_end=1959, ) @@ -650,13 +647,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -686,7 +683,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -722,7 +719,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -740,8 +737,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1548, - serialized_end=1690, + serialized_start=1962, + serialized_end=2143, ) @@ -779,7 +776,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -797,8 +794,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1692, - serialized_end=1802, + serialized_start=2145, + serialized_end=2255, ) @@ -824,7 +821,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -854,8 +851,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1805, - serialized_end=1938, + serialized_start=2258, + serialized_end=2396, ) @@ -875,13 +872,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A"\n automl.googleapis.com/ColumnSpec', file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -911,8 +908,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1940, - serialized_end=2024, + serialized_start=2398, + serialized_end=2524, ) @@ -932,13 +929,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A!\n\037automl.googleapis.com/TableSpec", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -968,7 +965,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1004,7 +1001,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1022,8 +1019,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2027, - serialized_end=2170, + serialized_start=2527, + serialized_end=2711, ) @@ -1061,7 +1058,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1079,8 +1076,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2172, - serialized_end=2285, + serialized_start=2713, + serialized_end=2826, ) @@ -1106,7 +1103,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1136,8 +1133,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2288, - serialized_end=2424, + serialized_start=2829, + serialized_end=2970, ) @@ -1157,13 +1154,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1181,7 +1178,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, ), ], @@ -1193,8 +1190,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2426, - serialized_end=2513, + serialized_start=2973, + serialized_end=3108, ) @@ -1214,15 +1211,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -1232,8 +1229,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2515, - serialized_end=2546, + serialized_start=3110, + serialized_end=3178, ) @@ -1253,13 +1250,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1271,7 +1268,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1307,7 +1304,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1325,8 +1322,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2548, - serialized_end=2638, + serialized_start=3181, + serialized_end=3314, ) @@ -1364,7 +1361,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1382,8 +1379,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2640, - serialized_end=2736, + serialized_start=3316, + serialized_end=3412, ) @@ -1403,15 +1400,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -1421,8 +1418,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2738, - serialized_end=2772, + serialized_start=3414, + serialized_end=3485, ) @@ -1478,13 +1475,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, ), ], @@ -1502,10 +1499,10 @@ index=0, containing_type=None, fields=[], - ) + ), ], - serialized_start=2775, - serialized_end=3105, + serialized_start=3488, + serialized_end=3855, ) @@ -1525,15 +1522,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -1543,8 +1540,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3107, - serialized_end=3143, + serialized_start=3857, + serialized_end=3930, ) @@ -1564,13 +1561,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1588,7 +1585,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, ), ], @@ -1600,8 +1597,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3145, - serialized_end=3256, + serialized_start=3933, + serialized_end=4086, ) @@ -1621,13 +1618,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1645,7 +1642,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, ), ], @@ -1657,8 +1654,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3259, - serialized_end=3394, + serialized_start=4089, + serialized_end=4266, ) @@ -1678,15 +1675,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A'\n%automl.googleapis.com/ModelEvaluation", file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -1696,8 +1693,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3396, - serialized_end=3437, + serialized_start=4268, + serialized_end=4356, ) @@ -1717,13 +1714,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1735,7 +1732,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1771,7 +1768,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1789,8 +1786,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3439, - serialized_end=3539, + serialized_start=4359, + serialized_end=4496, ) @@ -1828,7 +1825,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1846,8 +1843,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3541, - serialized_end=3668, + serialized_start=4498, + serialized_end=4625, ) _CREATEDATASETREQUEST.fields_by_name[ @@ -2001,85 +1998,87 @@ CreateDatasetRequest = _reflection.GeneratedProtocolMessageType( "CreateDatasetRequest", (_message.Message,), - dict( - DESCRIPTOR=_CREATEDATASETREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _CREATEDATASETREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. Attributes: parent: - The resource name of the project to create the dataset for. + Required. The resource name of the project to create the + dataset for. dataset: - The dataset to create. + Required. The dataset to create. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CreateDatasetRequest) - ), + }, ) _sym_db.RegisterMessage(CreateDatasetRequest) GetDatasetRequest = _reflection.GeneratedProtocolMessageType( "GetDatasetRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETDATASETREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _GETDATASETREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. Attributes: name: - The resource name of the dataset to retrieve. + Required. The resource name of the dataset to retrieve. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetDatasetRequest) - ), + }, ) _sym_db.RegisterMessage(GetDatasetRequest) ListDatasetsRequest = _reflection.GeneratedProtocolMessageType( "ListDatasetsRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTDATASETSREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _LISTDATASETSREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. Attributes: parent: - The resource name of the project from which to list datasets. + Required. The resource name of the project from which to list + datasets. filter: An expression for filtering the results of the request. - - ``dataset_metadata`` - for existence of the case (e.g. - image\_classification\_dataset\_metadata:\*). Some examples of - using the filter are: - - ``translation_dataset_metadata:*`` --> The dataset has - translation\_dataset\_metadata. + ``dataset_metadata`` - for existence of the case ( + e.g. image_classification_dataset_metadata:*). Some examples + of using the filter are: - + ``translation_dataset_metadata:*`` –> The dataset has + translation_dataset_metadata. page_size: Requested page size. Server may return fewer results than requested. If unspecified, server will pick a default size. page_token: A token identifying a page of results for the server to return - Typically obtained via [ListDatasetsResponse.next\_page\_token - ][google.cloud.automl.v1beta1.ListDatasetsResponse.next\_page\ - _token] of the previous [AutoMl.ListDatasets][google.cloud.aut - oml.v1beta1.AutoMl.ListDatasets] call. + Typically obtained via [ListDatasetsResponse.next_page_token][ + google.cloud.automl.v1beta1.ListDatasetsResponse.next_page_tok + en] of the previous [AutoMl.ListDatasets][google.cloud.automl. + v1beta1.AutoMl.ListDatasets] call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListDatasetsRequest) - ), + }, ) _sym_db.RegisterMessage(ListDatasetsRequest) ListDatasetsResponse = _reflection.GeneratedProtocolMessageType( "ListDatasetsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTDATASETSRESPONSE, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Response message for + { + "DESCRIPTOR": _LISTDATASETSRESPONSE, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. @@ -2088,61 +2087,62 @@ The datasets read. next_page_token: A token to retrieve next page of results. Pass to [ListDataset - sRequest.page\_token][google.cloud.automl.v1beta1.ListDatasets - Request.page\_token] to obtain that page. + sRequest.page_token][google.cloud.automl.v1beta1.ListDatasetsR + equest.page_token] to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListDatasetsResponse) - ), + }, ) _sym_db.RegisterMessage(ListDatasetsResponse) UpdateDatasetRequest = _reflection.GeneratedProtocolMessageType( "UpdateDatasetRequest", (_message.Message,), - dict( - DESCRIPTOR=_UPDATEDATASETREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _UPDATEDATASETREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] Attributes: dataset: - The dataset which replaces the resource on the server. + Required. The dataset which replaces the resource on the + server. update_mask: The update mask applies to the resource. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UpdateDatasetRequest) - ), + }, ) _sym_db.RegisterMessage(UpdateDatasetRequest) DeleteDatasetRequest = _reflection.GeneratedProtocolMessageType( "DeleteDatasetRequest", (_message.Message,), - dict( - DESCRIPTOR=_DELETEDATASETREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _DELETEDATASETREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. Attributes: name: - The resource name of the dataset to delete. + Required. The resource name of the dataset to delete. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeleteDatasetRequest) - ), + }, ) _sym_db.RegisterMessage(DeleteDatasetRequest) ImportDataRequest = _reflection.GeneratedProtocolMessageType( "ImportDataRequest", (_message.Message,), - dict( - DESCRIPTOR=_IMPORTDATAREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _IMPORTDATAREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. @@ -2155,17 +2155,17 @@ semantics, if any. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImportDataRequest) - ), + }, ) _sym_db.RegisterMessage(ImportDataRequest) ExportDataRequest = _reflection.GeneratedProtocolMessageType( "ExportDataRequest", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTDATAREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _EXPORTDATAREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. @@ -2176,63 +2176,65 @@ Required. The desired output location. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportDataRequest) - ), + }, ) _sym_db.RegisterMessage(ExportDataRequest) GetAnnotationSpecRequest = _reflection.GeneratedProtocolMessageType( "GetAnnotationSpecRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETANNOTATIONSPECREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _GETANNOTATIONSPECREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. Attributes: name: - The resource name of the annotation spec to retrieve. + Required. The resource name of the annotation spec to + retrieve. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetAnnotationSpecRequest) - ), + }, ) _sym_db.RegisterMessage(GetAnnotationSpecRequest) GetTableSpecRequest = _reflection.GeneratedProtocolMessageType( "GetTableSpecRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETTABLESPECREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _GETTABLESPECREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. Attributes: name: - The resource name of the table spec to retrieve. + Required. The resource name of the table spec to retrieve. field_mask: Mask specifying which fields to read. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetTableSpecRequest) - ), + }, ) _sym_db.RegisterMessage(GetTableSpecRequest) ListTableSpecsRequest = _reflection.GeneratedProtocolMessageType( "ListTableSpecsRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTTABLESPECSREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _LISTTABLESPECSREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. Attributes: parent: - The resource name of the dataset to list table specs from. + Required. The resource name of the dataset to list table specs + from. field_mask: Mask specifying which fields to read. filter: @@ -2244,23 +2246,22 @@ page_token: A token identifying a page of results for the server to return. Typically obtained from the [ListTableSpecsResponse.ne - xt\_page\_token][google.cloud.automl.v1beta1.ListTableSpecsRes - ponse.next\_page\_token] field of the previous [AutoMl.ListTab - leSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs] - call. + xt_page_token][google.cloud.automl.v1beta1.ListTableSpecsRespo + nse.next_page_token] field of the previous [AutoMl.ListTableSp + ecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs] call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListTableSpecsRequest) - ), + }, ) _sym_db.RegisterMessage(ListTableSpecsRequest) ListTableSpecsResponse = _reflection.GeneratedProtocolMessageType( "ListTableSpecsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTTABLESPECSRESPONSE, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Response message for + { + "DESCRIPTOR": _LISTTABLESPECSRESPONSE, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. @@ -2269,69 +2270,71 @@ The table specs read. next_page_token: A token to retrieve next page of results. Pass to [ListTableSp - ecsRequest.page\_token][google.cloud.automl.v1beta1.ListTableS - pecsRequest.page\_token] to obtain that page. + ecsRequest.page_token][google.cloud.automl.v1beta1.ListTableSp + ecsRequest.page_token] to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListTableSpecsResponse) - ), + }, ) _sym_db.RegisterMessage(ListTableSpecsResponse) UpdateTableSpecRequest = _reflection.GeneratedProtocolMessageType( "UpdateTableSpecRequest", (_message.Message,), - dict( - DESCRIPTOR=_UPDATETABLESPECREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _UPDATETABLESPECREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] Attributes: table_spec: - The table spec which replaces the resource on the server. + Required. The table spec which replaces the resource on the + server. update_mask: The update mask applies to the resource. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UpdateTableSpecRequest) - ), + }, ) _sym_db.RegisterMessage(UpdateTableSpecRequest) GetColumnSpecRequest = _reflection.GeneratedProtocolMessageType( "GetColumnSpecRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETCOLUMNSPECREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _GETCOLUMNSPECREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. Attributes: name: - The resource name of the column spec to retrieve. + Required. The resource name of the column spec to retrieve. field_mask: Mask specifying which fields to read. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetColumnSpecRequest) - ), + }, ) _sym_db.RegisterMessage(GetColumnSpecRequest) ListColumnSpecsRequest = _reflection.GeneratedProtocolMessageType( "ListColumnSpecsRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTCOLUMNSPECSREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _LISTCOLUMNSPECSREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. Attributes: parent: - The resource name of the table spec to list column specs from. + Required. The resource name of the table spec to list column + specs from. field_mask: Mask specifying which fields to read. filter: @@ -2343,23 +2346,23 @@ page_token: A token identifying a page of results for the server to return. Typically obtained from the [ListColumnSpecsResponse.n - ext\_page\_token][google.cloud.automl.v1beta1.ListColumnSpecsR - esponse.next\_page\_token] field of the previous [AutoMl.ListC - olumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs - ] call. + ext_page_token][google.cloud.automl.v1beta1.ListColumnSpecsRes + ponse.next_page_token] field of the previous [AutoMl.ListColum + nSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs] + call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListColumnSpecsRequest) - ), + }, ) _sym_db.RegisterMessage(ListColumnSpecsRequest) ListColumnSpecsResponse = _reflection.GeneratedProtocolMessageType( "ListColumnSpecsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTCOLUMNSPECSRESPONSE, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Response message for + { + "DESCRIPTOR": _LISTCOLUMNSPECSRESPONSE, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. @@ -2368,118 +2371,120 @@ The column specs read. next_page_token: A token to retrieve next page of results. Pass to [ListColumnS - pecsRequest.page\_token][google.cloud.automl.v1beta1.ListColum - nSpecsRequest.page\_token] to obtain that page. + pecsRequest.page_token][google.cloud.automl.v1beta1.ListColumn + SpecsRequest.page_token] to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListColumnSpecsResponse) - ), + }, ) _sym_db.RegisterMessage(ListColumnSpecsResponse) UpdateColumnSpecRequest = _reflection.GeneratedProtocolMessageType( "UpdateColumnSpecRequest", (_message.Message,), - dict( - DESCRIPTOR=_UPDATECOLUMNSPECREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _UPDATECOLUMNSPECREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] Attributes: column_spec: - The column spec which replaces the resource on the server. + Required. The column spec which replaces the resource on the + server. update_mask: The update mask applies to the resource. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UpdateColumnSpecRequest) - ), + }, ) _sym_db.RegisterMessage(UpdateColumnSpecRequest) CreateModelRequest = _reflection.GeneratedProtocolMessageType( "CreateModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_CREATEMODELREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _CREATEMODELREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. Attributes: parent: - Resource name of the parent project where the model is being - created. + Required. Resource name of the parent project where the model + is being created. model: - The model to create. + Required. The model to create. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CreateModelRequest) - ), + }, ) _sym_db.RegisterMessage(CreateModelRequest) GetModelRequest = _reflection.GeneratedProtocolMessageType( "GetModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETMODELREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _GETMODELREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. Attributes: name: - Resource name of the model. + Required. Resource name of the model. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetModelRequest) - ), + }, ) _sym_db.RegisterMessage(GetModelRequest) ListModelsRequest = _reflection.GeneratedProtocolMessageType( "ListModelsRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELSREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _LISTMODELSREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. Attributes: parent: - Resource name of the project, from which to list the models. + Required. Resource name of the project, from which to list the + models. filter: An expression for filtering the results of the request. - - ``model_metadata`` - for existence of the case (e.g. - video\_classification\_model\_metadata:\*). - ``dataset_id`` + ``model_metadata`` - for existence of the case ( + e.g. video_classification_model_metadata:*). - ``dataset_id`` - for = or !=. Some examples of using the filter are: - - ``image_classification_model_metadata:*`` --> The model has - image\_classification\_model\_metadata. - ``dataset_id=5`` - --> The model was created from a dataset with ID 5. + ``image_classification_model_metadata:*`` –> The model has + image_classification_model_metadata. - ``dataset_id=5`` –> + The model was created from a dataset with ID 5. page_size: Requested page size. page_token: A token identifying a page of results for the server to return - Typically obtained via [ListModelsResponse.next\_page\_token][ - google.cloud.automl.v1beta1.ListModelsResponse.next\_page\_tok - en] of the previous [AutoMl.ListModels][google.cloud.automl.v1 - beta1.AutoMl.ListModels] call. + Typically obtained via [ListModelsResponse.next_page_token][go + ogle.cloud.automl.v1beta1.ListModelsResponse.next_page_token] + of the previous [AutoMl.ListModels][google.cloud.automl.v1beta + 1.AutoMl.ListModels] call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelsRequest) - ), + }, ) _sym_db.RegisterMessage(ListModelsRequest) ListModelsResponse = _reflection.GeneratedProtocolMessageType( "ListModelsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELSRESPONSE, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Response message for + { + "DESCRIPTOR": _LISTMODELSRESPONSE, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. @@ -2488,40 +2493,40 @@ List of models in the requested page. next_page_token: A token to retrieve next page of results. Pass to [ListModelsR - equest.page\_token][google.cloud.automl.v1beta1.ListModelsRequ - est.page\_token] to obtain that page. + equest.page_token][google.cloud.automl.v1beta1.ListModelsReque + st.page_token] to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelsResponse) - ), + }, ) _sym_db.RegisterMessage(ListModelsResponse) DeleteModelRequest = _reflection.GeneratedProtocolMessageType( "DeleteModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_DELETEMODELREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _DELETEMODELREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. Attributes: name: - Resource name of the model being deleted. + Required. Resource name of the model being deleted. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeleteModelRequest) - ), + }, ) _sym_db.RegisterMessage(DeleteModelRequest) DeployModelRequest = _reflection.GeneratedProtocolMessageType( "DeployModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_DEPLOYMODELREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _DEPLOYMODELREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. @@ -2533,39 +2538,39 @@ image_classification_model_deployment_metadata: Model deployment metadata specific to Image Classification. name: - Resource name of the model to deploy. + Required. Resource name of the model to deploy. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeployModelRequest) - ), + }, ) _sym_db.RegisterMessage(DeployModelRequest) UndeployModelRequest = _reflection.GeneratedProtocolMessageType( "UndeployModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_UNDEPLOYMODELREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _UNDEPLOYMODELREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. Attributes: name: - Resource name of the model to undeploy. + Required. Resource name of the model to undeploy. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UndeployModelRequest) - ), + }, ) _sym_db.RegisterMessage(UndeployModelRequest) ExportModelRequest = _reflection.GeneratedProtocolMessageType( "ExportModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTMODELREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _EXPORTMODELREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. Models need to be enabled for exporting, otherwise an error code will be returned. @@ -2578,17 +2583,17 @@ Required. The desired output location and configuration. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportModelRequest) - ), + }, ) _sym_db.RegisterMessage(ExportModelRequest) ExportEvaluatedExamplesRequest = _reflection.GeneratedProtocolMessageType( "ExportEvaluatedExamplesRequest", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTEVALUATEDEXAMPLESREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. @@ -2600,74 +2605,75 @@ Required. The desired output location and configuration. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest) - ), + }, ) _sym_db.RegisterMessage(ExportEvaluatedExamplesRequest) GetModelEvaluationRequest = _reflection.GeneratedProtocolMessageType( "GetModelEvaluationRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETMODELEVALUATIONREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _GETMODELEVALUATIONREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. Attributes: name: - Resource name for the model evaluation. + Required. Resource name for the model evaluation. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetModelEvaluationRequest) - ), + }, ) _sym_db.RegisterMessage(GetModelEvaluationRequest) ListModelEvaluationsRequest = _reflection.GeneratedProtocolMessageType( "ListModelEvaluationsRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELEVALUATIONSREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _LISTMODELEVALUATIONSREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. Attributes: parent: - Resource name of the model to list the model evaluations for. - If modelId is set as "-", this will list model evaluations - from across all models of the parent location. + Required. Resource name of the model to list the model + evaluations for. If modelId is set as “-”, this will list + model evaluations from across all models of the parent + location. filter: An expression for filtering the results of the request. - ``annotation_spec_id`` - for =, != or existence. See example below for the last. Some examples of using the filter are: - - ``annotation_spec_id!=4`` --> The model evaluation was done + - ``annotation_spec_id!=4`` –> The model evaluation was done for annotation spec with ID different than 4. - ``NOT - annotation_spec_id:*`` --> The model evaluation was done for + annotation_spec_id:*`` –> The model evaluation was done for aggregate of all annotation specs. page_size: Requested page size. page_token: A token identifying a page of results for the server to return. Typically obtained via [ListModelEvaluationsResponse.n - ext\_page\_token][google.cloud.automl.v1beta1.ListModelEvaluat - ionsResponse.next\_page\_token] of the previous [AutoMl.ListMo - delEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEv - aluations] call. + ext_page_token][google.cloud.automl.v1beta1.ListModelEvaluatio + nsResponse.next_page_token] of the previous [AutoMl.ListModelE + valuations][google.cloud.automl.v1beta1.AutoMl.ListModelEvalua + tions] call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelEvaluationsRequest) - ), + }, ) _sym_db.RegisterMessage(ListModelEvaluationsRequest) ListModelEvaluationsResponse = _reflection.GeneratedProtocolMessageType( "ListModelEvaluationsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELEVALUATIONSRESPONSE, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Response message for + { + "DESCRIPTOR": _LISTMODELEVALUATIONSRESPONSE, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. @@ -2676,29 +2682,57 @@ List of model evaluations in the requested page. next_page_token: A token to retrieve next page of results. Pass to the [ListMod - elEvaluationsRequest.page\_token][google.cloud.automl.v1beta1. - ListModelEvaluationsRequest.page\_token] field of a new [AutoM - l.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.Lis - tModelEvaluations] request to obtain that page. + elEvaluationsRequest.page_token][google.cloud.automl.v1beta1.L + istModelEvaluationsRequest.page_token] field of a new [AutoMl. + ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListM + odelEvaluations] request to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelEvaluationsResponse) - ), + }, ) _sym_db.RegisterMessage(ListModelEvaluationsResponse) DESCRIPTOR._options = None +_CREATEDATASETREQUEST.fields_by_name["parent"]._options = None +_CREATEDATASETREQUEST.fields_by_name["dataset"]._options = None +_GETDATASETREQUEST.fields_by_name["name"]._options = None +_LISTDATASETSREQUEST.fields_by_name["parent"]._options = None +_UPDATEDATASETREQUEST.fields_by_name["dataset"]._options = None +_DELETEDATASETREQUEST.fields_by_name["name"]._options = None +_IMPORTDATAREQUEST.fields_by_name["name"]._options = None +_IMPORTDATAREQUEST.fields_by_name["input_config"]._options = None +_EXPORTDATAREQUEST.fields_by_name["name"]._options = None +_EXPORTDATAREQUEST.fields_by_name["output_config"]._options = None +_GETANNOTATIONSPECREQUEST.fields_by_name["name"]._options = None +_GETTABLESPECREQUEST.fields_by_name["name"]._options = None +_LISTTABLESPECSREQUEST.fields_by_name["parent"]._options = None +_UPDATETABLESPECREQUEST.fields_by_name["table_spec"]._options = None +_GETCOLUMNSPECREQUEST.fields_by_name["name"]._options = None +_LISTCOLUMNSPECSREQUEST.fields_by_name["parent"]._options = None +_UPDATECOLUMNSPECREQUEST.fields_by_name["column_spec"]._options = None +_CREATEMODELREQUEST.fields_by_name["parent"]._options = None +_CREATEMODELREQUEST.fields_by_name["model"]._options = None +_GETMODELREQUEST.fields_by_name["name"]._options = None +_LISTMODELSREQUEST.fields_by_name["parent"]._options = None +_DELETEMODELREQUEST.fields_by_name["name"]._options = None +_DEPLOYMODELREQUEST.fields_by_name["name"]._options = None +_UNDEPLOYMODELREQUEST.fields_by_name["name"]._options = None +_EXPORTMODELREQUEST.fields_by_name["name"]._options = None +_EXPORTMODELREQUEST.fields_by_name["output_config"]._options = None +_EXPORTEVALUATEDEXAMPLESREQUEST.fields_by_name["name"]._options = None +_EXPORTEVALUATEDEXAMPLESREQUEST.fields_by_name["output_config"]._options = None +_GETMODELEVALUATIONREQUEST.fields_by_name["name"]._options = None +_LISTMODELEVALUATIONSREQUEST.fields_by_name["parent"]._options = None _AUTOML = _descriptor.ServiceDescriptor( name="AutoMl", full_name="google.cloud.automl.v1beta1.AutoMl", file=DESCRIPTOR, index=0, - serialized_options=_b( - "\312A\025automl.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" - ), - serialized_start=3671, - serialized_end=8112, + serialized_options=b"\312A\025automl.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", + serialized_start=4628, + serialized_end=9729, methods=[ _descriptor.MethodDescriptor( name="CreateDataset", @@ -2707,9 +2741,7 @@ containing_service=None, input_type=_CREATEDATASETREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET, - serialized_options=_b( - '\202\323\344\223\002<"1/v1beta1/{parent=projects/*/locations/*}/datasets:\007dataset' - ), + serialized_options=b'\202\323\344\223\002<"1/v1beta1/{parent=projects/*/locations/*}/datasets:\007dataset\332A\016parent,dataset', ), _descriptor.MethodDescriptor( name="GetDataset", @@ -2718,9 +2750,7 @@ containing_service=None, input_type=_GETDATASETREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET, - serialized_options=_b( - "\202\323\344\223\0023\0221/v1beta1/{name=projects/*/locations/*/datasets/*}" - ), + serialized_options=b"\202\323\344\223\0023\0221/v1beta1/{name=projects/*/locations/*/datasets/*}\332A\004name", ), _descriptor.MethodDescriptor( name="ListDatasets", @@ -2729,9 +2759,7 @@ containing_service=None, input_type=_LISTDATASETSREQUEST, output_type=_LISTDATASETSRESPONSE, - serialized_options=_b( - "\202\323\344\223\0023\0221/v1beta1/{parent=projects/*/locations/*}/datasets" - ), + serialized_options=b"\202\323\344\223\0023\0221/v1beta1/{parent=projects/*/locations/*}/datasets\332A\006parent", ), _descriptor.MethodDescriptor( name="UpdateDataset", @@ -2740,9 +2768,7 @@ containing_service=None, input_type=_UPDATEDATASETREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET, - serialized_options=_b( - "\202\323\344\223\002D29/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}:\007dataset" - ), + serialized_options=b"\202\323\344\223\002D29/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}:\007dataset\332A\007dataset", ), _descriptor.MethodDescriptor( name="DeleteDataset", @@ -2751,9 +2777,7 @@ containing_service=None, input_type=_DELETEDATASETREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\0023*1/v1beta1/{name=projects/*/locations/*/datasets/*}" - ), + serialized_options=b"\202\323\344\223\0023*1/v1beta1/{name=projects/*/locations/*/datasets/*}\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata", ), _descriptor.MethodDescriptor( name="ImportData", @@ -2762,9 +2786,7 @@ containing_service=None, input_type=_IMPORTDATAREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002A"/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}" - ), + serialized_options=b"\202\323\344\223\002@\022>/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}\332A\004name", ), _descriptor.MethodDescriptor( name="ListTableSpecs", @@ -2806,9 +2822,7 @@ containing_service=None, input_type=_LISTTABLESPECSREQUEST, output_type=_LISTTABLESPECSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002@\022>/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs" - ), + serialized_options=b"\202\323\344\223\002@\022>/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs\332A\006parent", ), _descriptor.MethodDescriptor( name="UpdateTableSpec", @@ -2817,9 +2831,7 @@ containing_service=None, input_type=_UPDATETABLESPECREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2._TABLESPEC, - serialized_options=_b( - "\202\323\344\223\002W2I/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}:\ntable_spec" - ), + serialized_options=b"\202\323\344\223\002W2I/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}:\ntable_spec\332A\ntable_spec", ), _descriptor.MethodDescriptor( name="GetColumnSpec", @@ -2828,9 +2840,7 @@ containing_service=None, input_type=_GETCOLUMNSPECREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC, - serialized_options=_b( - "\202\323\344\223\002N\022L/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}" - ), + serialized_options=b"\202\323\344\223\002N\022L/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}\332A\004name", ), _descriptor.MethodDescriptor( name="ListColumnSpecs", @@ -2839,9 +2849,7 @@ containing_service=None, input_type=_LISTCOLUMNSPECSREQUEST, output_type=_LISTCOLUMNSPECSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002N\022L/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs" - ), + serialized_options=b"\202\323\344\223\002N\022L/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs\332A\006parent", ), _descriptor.MethodDescriptor( name="UpdateColumnSpec", @@ -2850,9 +2858,7 @@ containing_service=None, input_type=_UPDATECOLUMNSPECREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC, - serialized_options=_b( - "\202\323\344\223\002g2X/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}:\013column_spec" - ), + serialized_options=b"\202\323\344\223\002g2X/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}:\013column_spec\332A\013column_spec", ), _descriptor.MethodDescriptor( name="CreateModel", @@ -2861,9 +2867,7 @@ containing_service=None, input_type=_CREATEMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0028"//v1beta1/{parent=projects/*/locations/*}/models:\005model' - ), + serialized_options=b'\202\323\344\223\0028"//v1beta1/{parent=projects/*/locations/*}/models:\005model\332A\014parent,model\312A\032\n\005Model\022\021OperationMetadata', ), _descriptor.MethodDescriptor( name="GetModel", @@ -2872,9 +2876,7 @@ containing_service=None, input_type=_GETMODELREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2._MODEL, - serialized_options=_b( - "\202\323\344\223\0021\022//v1beta1/{name=projects/*/locations/*/models/*}" - ), + serialized_options=b"\202\323\344\223\0021\022//v1beta1/{name=projects/*/locations/*/models/*}\332A\004name", ), _descriptor.MethodDescriptor( name="ListModels", @@ -2883,9 +2885,7 @@ containing_service=None, input_type=_LISTMODELSREQUEST, output_type=_LISTMODELSRESPONSE, - serialized_options=_b( - "\202\323\344\223\0021\022//v1beta1/{parent=projects/*/locations/*}/models" - ), + serialized_options=b"\202\323\344\223\0021\022//v1beta1/{parent=projects/*/locations/*}/models\332A\006parent", ), _descriptor.MethodDescriptor( name="DeleteModel", @@ -2894,9 +2894,7 @@ containing_service=None, input_type=_DELETEMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\0021*//v1beta1/{name=projects/*/locations/*/models/*}" - ), + serialized_options=b"\202\323\344\223\0021*//v1beta1/{name=projects/*/locations/*/models/*}\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata", ), _descriptor.MethodDescriptor( name="DeployModel", @@ -2905,9 +2903,7 @@ containing_service=None, input_type=_DEPLOYMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002;"6/v1beta1/{name=projects/*/locations/*/models/*}:deploy:\001*' - ), + serialized_options=b'\202\323\344\223\002;"6/v1beta1/{name=projects/*/locations/*/models/*}:deploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', ), _descriptor.MethodDescriptor( name="UndeployModel", @@ -2916,9 +2912,7 @@ containing_service=None, input_type=_UNDEPLOYMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002="8/v1beta1/{name=projects/*/locations/*/models/*}:undeploy:\001*' - ), + serialized_options=b'\202\323\344\223\002="8/v1beta1/{name=projects/*/locations/*/models/*}:undeploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', ), _descriptor.MethodDescriptor( name="ExportModel", @@ -2927,9 +2921,7 @@ containing_service=None, input_type=_EXPORTMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002;"6/v1beta1/{name=projects/*/locations/*/models/*}:export:\001*' - ), + serialized_options=b'\202\323\344\223\002;"6/v1beta1/{name=projects/*/locations/*/models/*}:export:\001*\332A\022name,output_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', ), _descriptor.MethodDescriptor( name="ExportEvaluatedExamples", @@ -2938,9 +2930,7 @@ containing_service=None, input_type=_EXPORTEVALUATEDEXAMPLESREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002L"G/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples:\001*' - ), + serialized_options=b'\202\323\344\223\002L"G/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples:\001*\332A\022name,output_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', ), _descriptor.MethodDescriptor( name="GetModelEvaluation", @@ -2949,9 +2939,7 @@ containing_service=None, input_type=_GETMODELEVALUATIONREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2._MODELEVALUATION, - serialized_options=_b( - "\202\323\344\223\002D\022B/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}" - ), + serialized_options=b"\202\323\344\223\002D\022B/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\332A\004name", ), _descriptor.MethodDescriptor( name="ListModelEvaluations", @@ -2960,9 +2948,7 @@ containing_service=None, input_type=_LISTMODELEVALUATIONSREQUEST, output_type=_LISTMODELEVALUATIONSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002D\022B/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations" - ), + serialized_options=b"\202\323\344\223\002D\022B/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations\332A\006parent", ), ], ) diff --git a/google/cloud/automl_v1beta1/proto/service_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/service_pb2_grpc.py index eb049c20..efb69009 100644 --- a/google/cloud/automl_v1beta1/proto/service_pb2_grpc.py +++ b/google/cloud/automl_v1beta1/proto/service_pb2_grpc.py @@ -345,7 +345,8 @@ def DeployModel(self, request, context): [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number]) will reset the deployment state without pausing the model's availability. - Only applicable for Text Classification, Image Object Detection and Tables; all other domains manage deployment automatically. + Only applicable for Text Classification, Image Object Detection , Tables, and Image Segmentation; all other domains manage + deployment automatically. Returns an empty response in the [response][google.longrunning.Operation.response] field when it completes. diff --git a/google/cloud/automl_v1beta1/proto/table_spec.proto b/google/cloud/automl_v1beta1/proto/table_spec.proto index 4475617a..bc3fc744 100644 --- a/google/cloud/automl_v1beta1/proto/table_spec.proto +++ b/google/cloud/automl_v1beta1/proto/table_spec.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; +import "google/api/resource.proto"; import "google/cloud/automl/v1beta1/io.proto"; import "google/api/annotations.proto"; @@ -36,6 +36,11 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // Used by: // * Tables message TableSpec { + option (google.api.resource) = { + type: "automl.googleapis.com/TableSpec" + pattern: "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}" + }; + // Output only. The resource name of the table spec. // Form: // diff --git a/google/cloud/automl_v1beta1/proto/table_spec_pb2.py b/google/cloud/automl_v1beta1/proto/table_spec_pb2.py index 48aa9178..b38c3320 100644 --- a/google/cloud/automl_v1beta1/proto/table_spec_pb2.py +++ b/google/cloud/automl_v1beta1/proto/table_spec_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/table_spec.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,6 +12,7 @@ _sym_db = _symbol_database.Default() +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1beta1.proto import ( io_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2, ) @@ -25,13 +23,10 @@ name="google/cloud/automl_v1beta1/proto/table_spec.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n2google/cloud/automl_v1beta1/proto/table_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x1cgoogle/api/annotations.proto"\xc7\x01\n\tTableSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1b\n\x13time_column_spec_id\x18\x02 \x01(\t\x12\x11\n\trow_count\x18\x03 \x01(\x03\x12\x17\n\x0fvalid_row_count\x18\x04 \x01(\x03\x12\x14\n\x0c\x63olumn_count\x18\x07 \x01(\x03\x12?\n\rinput_configs\x18\x05 \x03(\x0b\x32(.google.cloud.automl.v1beta1.InputConfig\x12\x0c\n\x04\x65tag\x18\x06 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/table_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x1cgoogle/api/annotations.proto"\xc1\x02\n\tTableSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1b\n\x13time_column_spec_id\x18\x02 \x01(\t\x12\x11\n\trow_count\x18\x03 \x01(\x03\x12\x17\n\x0fvalid_row_count\x18\x04 \x01(\x03\x12\x14\n\x0c\x63olumn_count\x18\x07 \x01(\x03\x12?\n\rinput_configs\x18\x05 \x03(\x0b\x32(.google.cloud.automl.v1beta1.InputConfig\x12\x0c\n\x04\x65tag\x18\x06 \x01(\t:x\xea\x41u\n\x1f\x61utoml.googleapis.com/TableSpec\x12Rprojects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}B\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], @@ -54,7 +49,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -72,7 +67,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -162,7 +157,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -175,13 +170,13 @@ extensions=[], nested_types=[], enum_types=[], - serialized_options=None, + serialized_options=b"\352Au\n\037automl.googleapis.com/TableSpec\022Rprojects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=158, - serialized_end=357, + serialized_start=185, + serialized_end=506, ) _TABLESPEC.fields_by_name[ @@ -193,15 +188,15 @@ TableSpec = _reflection.GeneratedProtocolMessageType( "TableSpec", (_message.Message,), - dict( - DESCRIPTOR=_TABLESPEC, - __module__="google.cloud.automl_v1beta1.proto.table_spec_pb2", - __doc__="""A specification of a relational table. The table's schema + { + "DESCRIPTOR": _TABLESPEC, + "__module__": "google.cloud.automl_v1beta1.proto.table_spec_pb2", + "__doc__": """A specification of a relational table. The table’s schema is represented via its child column specs. It is pre-populated as part of ImportData by schema inference algorithm, the version of which is a required parameter of ImportData InputConfig. Note: While working with a table, at times the schema may be inconsistent with the data in the - table (e.g. string in a FLOAT64 column). The consistency validation is + table (e.g. string in a FLOAT64 column). The consistency validation is done upon creation of a model. Used by: \* Tables @@ -211,19 +206,19 @@ jects/{project_id}/locations/{location_id}/datasets/{dataset_i d}/tableSpecs/{table_spec_id}`` time_column_spec_id: - column\_spec\_id of the time column. Only used if the parent - dataset's ml\_use\_column\_spec\_id is not set. Used to split - rows into TRAIN, VALIDATE and TEST sets such that oldest rows - go to TRAIN set, newest to TEST, and those in between to - VALIDATE. Required type: TIMESTAMP. If both this column and - ml\_use\_column are not set, then ML use of all rows will be + column_spec_id of the time column. Only used if the parent + dataset’s ml_use_column_spec_id is not set. Used to split rows + into TRAIN, VALIDATE and TEST sets such that oldest rows go to + TRAIN set, newest to TEST, and those in between to VALIDATE. + Required type: TIMESTAMP. If both this column and + ml_use_column are not set, then ML use of all rows will be assigned by AutoML. NOTE: Updates of this field will instantly affect any other users concurrently working with the dataset. row_count: - Output only. The number of rows (i.e. examples) in the table. + Output only. The number of rows (i.e. examples) in the table. valid_row_count: - Output only. The number of valid rows (i.e. without values - that don't match DataType-s of their columns). + Output only. The number of valid rows (i.e. without values + that don’t match DataType-s of their columns). column_count: Output only. The number of columns of the table. That is, the number of child ColumnSpec-s. @@ -232,13 +227,14 @@ in the table had been imported. etag: Used to perform consistent read-modify-write updates. If not - set, a blind "overwrite" update happens. + set, a blind “overwrite” update happens. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TableSpec) - ), + }, ) _sym_db.RegisterMessage(TableSpec) DESCRIPTOR._options = None +_TABLESPEC._options = None # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/tables.proto b/google/cloud/automl_v1beta1/proto/tables.proto index 5b786c9f..5327f5e7 100644 --- a/google/cloud/automl_v1beta1/proto/tables.proto +++ b/google/cloud/automl_v1beta1/proto/tables.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,21 +11,21 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; -import "google/api/annotations.proto"; import "google/cloud/automl/v1beta1/classification.proto"; import "google/cloud/automl/v1beta1/column_spec.proto"; import "google/cloud/automl/v1beta1/data_items.proto"; import "google/cloud/automl/v1beta1/data_stats.proto"; import "google/cloud/automl/v1beta1/ranges.proto"; +import "google/cloud/automl/v1beta1/regression.proto"; import "google/cloud/automl/v1beta1/temporal.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; option java_multiple_files = true; @@ -249,6 +249,15 @@ message TablesAnnotation { // [column_display_name][google.cloud.automl.v1beta1.TablesModelColumnInfo.column_display_name] // would be populated, then this whole field is not. repeated TablesModelColumnInfo tables_model_column_info = 3; + + // Output only. Stores the prediction score for the baseline example, which + // is defined as the example with all values set to their baseline values. + // This is used as part of the Sampled Shapley explanation of the model's + // prediction. This field is populated only when feature importance is + // requested. For regression models, this holds the baseline prediction for + // the baseline example. For classification models, this holds the baseline + // prediction for the baseline example for the argmax class. + float baseline_score = 5; } // An information specific to given column and Tables Model, in context diff --git a/google/cloud/automl_v1beta1/proto/tables_pb2.py b/google/cloud/automl_v1beta1/proto/tables_pb2.py index 4659aa8d..32fe800b 100644 --- a/google/cloud/automl_v1beta1/proto/tables_pb2.py +++ b/google/cloud/automl_v1beta1/proto/tables_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/tables.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,7 +12,6 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.cloud.automl_v1beta1.proto import ( classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2, ) @@ -31,33 +27,34 @@ from google.cloud.automl_v1beta1.proto import ( ranges_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_ranges__pb2, ) +from google.cloud.automl_v1beta1.proto import ( + regression_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_regression__pb2, +) from google.cloud.automl_v1beta1.proto import ( temporal_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2, ) from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1beta1/proto/tables.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n.google/cloud/automl_v1beta1/proto/tables.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x33google/cloud/automl_v1beta1/proto/column_spec.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_stats.proto\x1a.google/cloud/automl_v1beta1/proto/ranges.proto\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xb0\x03\n\x15TablesDatasetMetadata\x12\x1d\n\x15primary_table_spec_id\x18\x01 \x01(\t\x12\x1d\n\x15target_column_spec_id\x18\x02 \x01(\t\x12\x1d\n\x15weight_column_spec_id\x18\x03 \x01(\t\x12\x1d\n\x15ml_use_column_spec_id\x18\x04 \x01(\t\x12t\n\x1atarget_column_correlations\x18\x06 \x03(\x0b\x32P.google.cloud.automl.v1beta1.TablesDatasetMetadata.TargetColumnCorrelationsEntry\x12\x35\n\x11stats_update_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1an\n\x1dTargetColumnCorrelationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12<\n\x05value\x18\x02 \x01(\x0b\x32-.google.cloud.automl.v1beta1.CorrelationStats:\x02\x38\x01"\x96\x04\n\x13TablesModelMetadata\x12-\n#optimization_objective_recall_value\x18\x11 \x01(\x02H\x00\x12\x30\n&optimization_objective_precision_value\x18\x12 \x01(\x02H\x00\x12\x43\n\x12target_column_spec\x18\x02 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12K\n\x1ainput_feature_column_specs\x18\x03 \x03(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12\x1e\n\x16optimization_objective\x18\x04 \x01(\t\x12T\n\x18tables_model_column_info\x18\x05 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03\x12\x1e\n\x16\x64isable_early_stopping\x18\x0c \x01(\x08\x42*\n(additional_optimization_objective_config"\xe5\x01\n\x10TablesAnnotation\x12\r\n\x05score\x18\x01 \x01(\x02\x12\x45\n\x13prediction_interval\x18\x04 \x01(\x0b\x32(.google.cloud.automl.v1beta1.DoubleRange\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\x12T\n\x18tables_model_column_info\x18\x03 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo"j\n\x15TablesModelColumnInfo\x12\x18\n\x10\x63olumn_spec_name\x18\x01 \x01(\t\x12\x1b\n\x13\x63olumn_display_name\x18\x02 \x01(\t\x12\x1a\n\x12\x66\x65\x61ture_importance\x18\x03 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n.google/cloud/automl_v1beta1/proto/tables.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x33google/cloud/automl_v1beta1/proto/column_spec.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_stats.proto\x1a.google/cloud/automl_v1beta1/proto/ranges.proto\x1a\x32google/cloud/automl_v1beta1/proto/regression.proto\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xb0\x03\n\x15TablesDatasetMetadata\x12\x1d\n\x15primary_table_spec_id\x18\x01 \x01(\t\x12\x1d\n\x15target_column_spec_id\x18\x02 \x01(\t\x12\x1d\n\x15weight_column_spec_id\x18\x03 \x01(\t\x12\x1d\n\x15ml_use_column_spec_id\x18\x04 \x01(\t\x12t\n\x1atarget_column_correlations\x18\x06 \x03(\x0b\x32P.google.cloud.automl.v1beta1.TablesDatasetMetadata.TargetColumnCorrelationsEntry\x12\x35\n\x11stats_update_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1an\n\x1dTargetColumnCorrelationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12<\n\x05value\x18\x02 \x01(\x0b\x32-.google.cloud.automl.v1beta1.CorrelationStats:\x02\x38\x01"\x96\x04\n\x13TablesModelMetadata\x12-\n#optimization_objective_recall_value\x18\x11 \x01(\x02H\x00\x12\x30\n&optimization_objective_precision_value\x18\x12 \x01(\x02H\x00\x12\x43\n\x12target_column_spec\x18\x02 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12K\n\x1ainput_feature_column_specs\x18\x03 \x03(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12\x1e\n\x16optimization_objective\x18\x04 \x01(\t\x12T\n\x18tables_model_column_info\x18\x05 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03\x12\x1e\n\x16\x64isable_early_stopping\x18\x0c \x01(\x08\x42*\n(additional_optimization_objective_config"\xfd\x01\n\x10TablesAnnotation\x12\r\n\x05score\x18\x01 \x01(\x02\x12\x45\n\x13prediction_interval\x18\x04 \x01(\x0b\x32(.google.cloud.automl.v1beta1.DoubleRange\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\x12T\n\x18tables_model_column_info\x18\x03 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo\x12\x16\n\x0e\x62\x61seline_score\x18\x05 \x01(\x02"j\n\x15TablesModelColumnInfo\x12\x18\n\x10\x63olumn_spec_name\x18\x01 \x01(\t\x12\x1b\n\x13\x63olumn_display_name\x18\x02 \x01(\t\x12\x1a\n\x12\x66\x65\x61ture_importance\x18\x03 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__items__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__stats__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_ranges__pb2.DESCRIPTOR, + google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_regression__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2.DESCRIPTOR, google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], ) @@ -78,7 +75,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -109,13 +106,13 @@ extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=806, - serialized_end=916, + serialized_start=858, + serialized_end=968, ) _TABLESDATASETMETADATA = _descriptor.Descriptor( @@ -134,7 +131,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -152,7 +149,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -170,7 +167,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -188,7 +185,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -235,15 +232,15 @@ ), ], extensions=[], - nested_types=[_TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY], + nested_types=[_TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=484, - serialized_end=916, + serialized_start=536, + serialized_end=968, ) @@ -335,7 +332,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -431,10 +428,10 @@ index=0, containing_type=None, fields=[], - ) + ), ], - serialized_start=919, - serialized_end=1453, + serialized_start=971, + serialized_end=1505, ) @@ -517,6 +514,24 @@ serialized_options=None, file=DESCRIPTOR, ), + _descriptor.FieldDescriptor( + name="baseline_score", + full_name="google.cloud.automl.v1beta1.TablesAnnotation.baseline_score", + index=4, + number=5, + type=2, + cpp_type=6, + label=1, + has_default_value=False, + default_value=float(0), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), ], extensions=[], nested_types=[], @@ -526,8 +541,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1456, - serialized_end=1685, + serialized_start=1508, + serialized_end=1761, ) @@ -547,7 +562,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -565,7 +580,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -601,8 +616,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1687, - serialized_end=1793, + serialized_start=1763, + serialized_end=1869, ) _TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY.fields_by_name[ @@ -672,27 +687,27 @@ TablesDatasetMetadata = _reflection.GeneratedProtocolMessageType( "TablesDatasetMetadata", (_message.Message,), - dict( - TargetColumnCorrelationsEntry=_reflection.GeneratedProtocolMessageType( + { + "TargetColumnCorrelationsEntry": _reflection.GeneratedProtocolMessageType( "TargetColumnCorrelationsEntry", (_message.Message,), - dict( - DESCRIPTOR=_TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY, - __module__="google.cloud.automl_v1beta1.proto.tables_pb2" + { + "DESCRIPTOR": _TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TablesDatasetMetadata.TargetColumnCorrelationsEntry) - ), + }, ), - DESCRIPTOR=_TABLESDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.tables_pb2", - __doc__="""Metadata for a dataset used for AutoML Tables. + "DESCRIPTOR": _TABLESDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", + "__doc__": """Metadata for a dataset used for AutoML Tables. Attributes: primary_table_spec_id: - Output only. The table\_spec\_id of the primary table of this + Output only. The table_spec_id of the primary table of this dataset. target_column_spec_id: - column\_spec\_id of the primary table's column that should be + column_spec_id of the primary table’s column that should be used as the training & prediction target. This column must be non-nullable and have one of following data types (otherwise model creation will error): - CATEGORY - FLOAT64 If the @@ -701,8 +716,8 @@ instantly affect any other users concurrently working with the dataset. weight_column_spec_id: - column\_spec\_id of the primary table's column that should be - used as the weight column, i.e. the higher the value the more + column_spec_id of the primary table’s column that should be + used as the weight column, i.e. the higher the value the more important the row will be during model training. Required type: FLOAT64. Allowed values: 0 to 10000, inclusive on both ends; 0 means the row is ignored for training. If not set all @@ -710,40 +725,40 @@ this field will instantly affect any other users concurrently working with the dataset. ml_use_column_spec_id: - column\_spec\_id of the primary table column which specifies a - possible ML use of the row, i.e. the column will be used to + column_spec_id of the primary table column which specifies a + possible ML use of the row, i.e. the column will be used to split the rows into TRAIN, VALIDATE and TEST sets. Required type: STRING. This column, if set, must either have all of ``TRAIN``, ``VALIDATE``, ``TEST`` among its values, or only have ``TEST``, ``UNASSIGNED`` values. In the latter case the rows with ``UNASSIGNED`` value will be assigned by AutoML. Note that if a given ml use distribution makes it impossible - to create a "good" model, that call will error describing the - issue. If both this column\_spec\_id and primary table's - time\_column\_spec\_id are not set, then all rows are treated - as ``UNASSIGNED``. NOTE: Updates of this field will instantly + to create a “good” model, that call will error describing the + issue. If both this column_spec_id and primary table’s + time_column_spec_id are not set, then all rows are treated as + ``UNASSIGNED``. NOTE: Updates of this field will instantly affect any other users concurrently working with the dataset. target_column_correlations: Output only. Correlations between [TablesDatasetMetadata.targ - et\_column\_spec\_id][google.cloud.automl.v1beta1.TablesDatase - tMetadata.target\_column\_spec\_id], and other columns of the - [TablesDatasetMetadataprimary\_table][google.cloud.automl.v1be - ta1.TablesDatasetMetadata.primary\_table\_spec\_id]. Only set - if the target column is set. Mapping from other column spec id - to its CorrelationStats with the target column. This field may - be stale, see the stats\_update\_time field for for the - timestamp at which these stats were last updated. + et_column_spec_id][google.cloud.automl.v1beta1.TablesDatasetMe + tadata.target_column_spec_id], and other columns of the [Tabl + esDatasetMetadataprimary_table][google.cloud.automl.v1beta1.Ta + blesDatasetMetadata.primary_table_spec_id]. Only set if the + target column is set. Mapping from other column spec id to its + CorrelationStats with the target column. This field may be + stale, see the stats_update_time field for for the timestamp + at which these stats were last updated. stats_update_time: Output only. The most recent timestamp when - target\_column\_correlations field and all descendant - ColumnSpec.data\_stats and ColumnSpec.top\_correlated\_columns + target_column_correlations field and all descendant + ColumnSpec.data_stats and ColumnSpec.top_correlated_columns fields were last (re-)generated. Any changes that happened to the dataset afterwards are not reflected in these fields values. The regeneration happens in the background on a best effort basis. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TablesDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(TablesDatasetMetadata) _sym_db.RegisterMessage(TablesDatasetMetadata.TargetColumnCorrelationsEntry) @@ -751,10 +766,10 @@ TablesModelMetadata = _reflection.GeneratedProtocolMessageType( "TablesModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TABLESMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.tables_pb2", - __doc__="""Model metadata specific to AutoML Tables. + { + "DESCRIPTOR": _TABLESMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", + "__doc__": """Model metadata specific to AutoML Tables. Attributes: @@ -763,74 +778,73 @@ ``MAXIMIZE_PRECISION_AT_RECALL`` and ``MAXIMIZE_RECALL_AT_PRECISION``, otherwise unused. optimization_objective_recall_value: - Required when optimization\_objective is - "MAXIMIZE\_PRECISION\_AT\_RECALL". Must be between 0 and 1, + Required when optimization_objective is + “MAXIMIZE_PRECISION_AT_RECALL”. Must be between 0 and 1, inclusive. optimization_objective_precision_value: - Required when optimization\_objective is - "MAXIMIZE\_RECALL\_AT\_PRECISION". Must be between 0 and 1, + Required when optimization_objective is + “MAXIMIZE_RECALL_AT_PRECISION”. Must be between 0 and 1, inclusive. target_column_spec: - Column spec of the dataset's primary table's column the model + Column spec of the dataset’s primary table’s column the model is predicting. Snapshotted when model creation started. Only 3 - fields are used: name - May be set on CreateModel, if it's not + fields are used: name - May be set on CreateModel, if it’s not then the ColumnSpec corresponding to the current - target\_column\_spec\_id of the dataset the model is trained - from is used. If neither is set, CreateModel will error. - display\_name - Output only. data\_type - Output only. + target_column_spec_id of the dataset the model is trained from + is used. If neither is set, CreateModel will error. + display_name - Output only. data_type - Output only. input_feature_column_specs: - Column specs of the dataset's primary table's columns, on + Column specs of the dataset’s primary table’s columns, on which the model is trained and which are used as the input for - predictions. The [target\_column][google.cloud.automl.v1beta1 - .TablesModelMetadata.target\_column\_spec] as well as, - according to dataset's state upon model creation, [weight\_co - lumn][google.cloud.automl.v1beta1.TablesDatasetMetadata.weight - \_column\_spec\_id], and [ml\_use\_column][google.cloud.autom - l.v1beta1.TablesDatasetMetadata.ml\_use\_column\_spec\_id] - must never be included here. Only 3 fields are used: - name - - May be set on CreateModel, if set only the columns specified - are used, otherwise all primary table's columns (except the - ones listed above) are used for the training and prediction - input. - display\_name - Output only. - data\_type - - Output only. + predictions. The [target_column][google.cloud.automl.v1beta1. + TablesModelMetadata.target_column_spec] as well as, according + to dataset’s state upon model creation, [weight_column][googl + e.cloud.automl.v1beta1.TablesDatasetMetadata.weight_column_spe + c_id], and [ml_use_column][google.cloud.automl.v1beta1.Tables + DatasetMetadata.ml_use_column_spec_id] must never be included + here. Only 3 fields are used: - name - May be set on + CreateModel, if set only the columns specified are used, + otherwise all primary table’s columns (except the ones + listed above) are used for the training and prediction input. + - display_name - Output only. - data_type - Output only. optimization_objective: Objective function the model is optimizing towards. The training process creates a model that maximizes/minimizes the value of the objective function over the validation set. The supported optimization objectives depend on the prediction type. If the field is not set, a default objective function is - used. CLASSIFICATION\_BINARY: "MAXIMIZE\_AU\_ROC" (default) - + used. CLASSIFICATION_BINARY: “MAXIMIZE_AU_ROC” (default) - Maximize the area under the receiver operating characteristic - (ROC) curve. "MINIMIZE\_LOG\_LOSS" - Minimize log loss. - "MAXIMIZE\_AU\_PRC" - Maximize the area under the precision- - recall curve. "MAXIMIZE\_PRECISION\_AT\_RECALL" - Maximize + (ROC) curve. “MINIMIZE_LOG_LOSS” - Minimize log loss. + “MAXIMIZE_AU_PRC” - Maximize the area under the precision- + recall curve. “MAXIMIZE_PRECISION_AT_RECALL” - Maximize precision for a specified recall value. - "MAXIMIZE\_RECALL\_AT\_PRECISION" - Maximize recall for a - specified precision value. CLASSIFICATION\_MULTI\_CLASS : - "MINIMIZE\_LOG\_LOSS" (default) - Minimize log loss. - REGRESSION: "MINIMIZE\_RMSE" (default) - Minimize root-mean- - squared error (RMSE). "MINIMIZE\_MAE" - Minimize mean-absolute - error (MAE). "MINIMIZE\_RMSLE" - Minimize root-mean-squared - log error (RMSLE). + “MAXIMIZE_RECALL_AT_PRECISION” - Maximize recall for a + specified precision value. CLASSIFICATION_MULTI_CLASS : + “MINIMIZE_LOG_LOSS” (default) - Minimize log loss. + REGRESSION: “MINIMIZE_RMSE” (default) - Minimize root-mean- + squared error (RMSE). “MINIMIZE_MAE” - Minimize mean-absolute + error (MAE). “MINIMIZE_RMSLE” - Minimize root-mean-squared log + error (RMSLE). tables_model_column_info: Output only. Auxiliary information for each of the - input\_feature\_column\_specs with respect to this particular + input_feature_column_specs with respect to this particular model. train_budget_milli_node_hours: Required. The train budget of creating this model, expressed - in milli node hours i.e. 1,000 value in this field means 1 + in milli node hours i.e. 1,000 value in this field means 1 node hour. The training cost of the model will not exceed this budget. The final cost will be attempted to be close to the budget, though may end up being (even) noticeably smaller - - at the backend's discretion. This especially may happen when + - at the backend’s discretion. This especially may happen when further model training ceases to provide any improvements. If the budget is set to a value known to be insufficient to train - a model for the given dataset, the training won't be attempted + a model for the given dataset, the training won’t be attempted and will error. The train budget must be between 1,000 and 72,000 milli node hours, inclusive. train_cost_milli_node_hours: Output only. The actual training cost of the model, expressed - in milli node hours, i.e. 1,000 value in this field means 1 + in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed the train budget. disable_early_stopping: Use the entire training budget. This disables the early @@ -839,60 +853,70 @@ before the entire training budget has been used. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TablesModelMetadata) - ), + }, ) _sym_db.RegisterMessage(TablesModelMetadata) TablesAnnotation = _reflection.GeneratedProtocolMessageType( "TablesAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_TABLESANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.tables_pb2", - __doc__="""Contains annotation details specific to Tables. + { + "DESCRIPTOR": _TABLESANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", + "__doc__": """Contains annotation details specific to Tables. Attributes: score: Output only. A confidence estimate between 0.0 and 1.0, inclusive. A higher value means greater confidence in the - returned value. For [target\_column\_spec][google.cloud.autom - l.v1beta1.TablesModelMetadata.target\_column\_spec] of FLOAT64 + returned value. For [target_column_spec][google.cloud.automl. + v1beta1.TablesModelMetadata.target_column_spec] of FLOAT64 data type the score is not populated. prediction_interval: - Output only. Only populated when [target\_column\_spec][googl - e.cloud.automl.v1beta1.TablesModelMetadata.target\_column\_spe - c] has FLOAT64 data type. An interval in which the exactly + Output only. Only populated when [target_column_spec][google. + cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + has FLOAT64 data type. An interval in which the exactly correct target value has 95% chance to be in. value: - The predicted value of the row's [target\_column][google.clou - d.automl.v1beta1.TablesModelMetadata.target\_column\_spec]. - The value depends on the column's DataType: - CATEGORY - the + The predicted value of the row’s [target_column][google.cloud + .automl.v1beta1.TablesModelMetadata.target_column_spec]. The + value depends on the column’s DataType: - CATEGORY - the predicted (with the above confidence ``score``) CATEGORY value. - FLOAT64 - the predicted (with above ``prediction_interval``) FLOAT64 value. tables_model_column_info: - Output only. Auxiliary information for each of the model's [i - nput\_feature\_column\_specs][google.cloud.automl.v1beta1.Tabl - esModelMetadata.input\_feature\_column\_specs] with respect to - this particular prediction. If no other fields than [column\_ - spec\_name][google.cloud.automl.v1beta1.TablesModelColumnInfo. - column\_spec\_name] and [column\_display\_name][google.cloud. - automl.v1beta1.TablesModelColumnInfo.column\_display\_name] - would be populated, then this whole field is not. + Output only. Auxiliary information for each of the model’s [i + nput_feature_column_specs][google.cloud.automl.v1beta1.TablesM + odelMetadata.input_feature_column_specs] with respect to this + particular prediction. If no other fields than [column_spec_n + ame][google.cloud.automl.v1beta1.TablesModelColumnInfo.column_ + spec_name] and [column_display_name][google.cloud.automl.v1be + ta1.TablesModelColumnInfo.column_display_name] would be + populated, then this whole field is not. + baseline_score: + Output only. Stores the prediction score for the baseline + example, which is defined as the example with all values set + to their baseline values. This is used as part of the Sampled + Shapley explanation of the model’s prediction. This field is + populated only when feature importance is requested. For + regression models, this holds the baseline prediction for the + baseline example. For classification models, this holds the + baseline prediction for the baseline example for the argmax + class. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TablesAnnotation) - ), + }, ) _sym_db.RegisterMessage(TablesAnnotation) TablesModelColumnInfo = _reflection.GeneratedProtocolMessageType( "TablesModelColumnInfo", (_message.Message,), - dict( - DESCRIPTOR=_TABLESMODELCOLUMNINFO, - __module__="google.cloud.automl_v1beta1.proto.tables_pb2", - __doc__="""An information specific to given column and Tables Model, + { + "DESCRIPTOR": _TABLESMODELCOLUMNINFO, + "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", + "__doc__": """An information specific to given column and Tables Model, in context of the Model and the predictions created by it. @@ -902,7 +926,7 @@ Not populated when this proto is outputted to BigQuery. column_display_name: Output only. The display name of the column (same as the - display\_name of its ColumnSpec). + display_name of its ColumnSpec). feature_importance: Output only. When given as part of a Model (always populated): Measurement of how much model predictions correctness on the @@ -910,10 +934,10 @@ and 1, higher means higher influence. These values are normalized - for all input feature columns of a given model they add to 1. When given back by Predict (populated iff - [feature\_importance + [feature_importance param][google.cloud.automl.v1beta1.PredictRequest.params] is - set) or Batch Predict (populated iff [feature\_importance][goo - gle.cloud.automl.v1beta1.PredictRequest.params] param is set): + set) or Batch Predict (populated iff [feature_importance][goog + le.cloud.automl.v1beta1.PredictRequest.params] param is set): Measurement of how impactful for the prediction returned for the given row the value in this column was. Specifically, the feature importance specifies the marginal contribution that @@ -922,7 +946,7 @@ Shapley method. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TablesModelColumnInfo) - ), + }, ) _sym_db.RegisterMessage(TablesModelColumnInfo) diff --git a/google/cloud/automl_v1beta1/proto/temporal.proto b/google/cloud/automl_v1beta1/proto/temporal.proto index 84874d99..76db8887 100644 --- a/google/cloud/automl_v1beta1/proto/temporal.proto +++ b/google/cloud/automl_v1beta1/proto/temporal.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/temporal_pb2.py b/google/cloud/automl_v1beta1/proto/temporal_pb2.py index a8e53db8..668bc578 100644 --- a/google/cloud/automl_v1beta1/proto/temporal_pb2.py +++ b/google/cloud/automl_v1beta1/proto/temporal_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/temporal.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -23,12 +20,8 @@ name="google/cloud/automl_v1beta1/proto/temporal.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n0google/cloud/automl_v1beta1/proto/temporal.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/api/annotations.proto"w\n\x0bTimeSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n0google/cloud/automl_v1beta1/proto/temporal.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/api/annotations.proto"w\n\x0bTimeSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -104,11 +97,11 @@ TimeSegment = _reflection.GeneratedProtocolMessageType( "TimeSegment", (_message.Message,), - dict( - DESCRIPTOR=_TIMESEGMENT, - __module__="google.cloud.automl_v1beta1.proto.temporal_pb2", - __doc__="""A time period inside of an example that has a time - dimension (e.g. video). + { + "DESCRIPTOR": _TIMESEGMENT, + "__module__": "google.cloud.automl_v1beta1.proto.temporal_pb2", + "__doc__": """A time period inside of an example that has a time dimension + (e.g. video). Attributes: @@ -120,7 +113,7 @@ duration since the example start. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TimeSegment) - ), + }, ) _sym_db.RegisterMessage(TimeSegment) diff --git a/google/cloud/automl_v1beta1/proto/text.proto b/google/cloud/automl_v1beta1/proto/text.proto index ca722e07..f6f33185 100644 --- a/google/cloud/automl_v1beta1/proto/text.proto +++ b/google/cloud/automl_v1beta1/proto/text.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,14 +11,13 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; -import "google/api/annotations.proto"; import "google/cloud/automl/v1beta1/classification.proto"; +import "google/api/annotations.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; option java_multiple_files = true; @@ -40,20 +39,27 @@ message TextClassificationModelMetadata { } // Dataset metadata that is specific to text extraction -message TextExtractionDatasetMetadata {} +message TextExtractionDatasetMetadata { + +} // Model metadata that is specific to text extraction. -message TextExtractionModelMetadata {} +message TextExtractionModelMetadata { + +} // Dataset metadata for text sentiment. message TextSentimentDatasetMetadata { - // Required. A sentiment is expressed as an integer ordinal, where higher - // value means a more positive sentiment. The range of sentiments that will be - // used is between 0 and sentiment_max (inclusive on both ends), and all the - // values in the range must be represented in the dataset before a model can - // be created. sentiment_max value must be between 1 and 10 (inclusive). + // Required. A sentiment is expressed as an integer ordinal, where higher value + // means a more positive sentiment. The range of sentiments that will be used + // is between 0 and sentiment_max (inclusive on both ends), and all the values + // in the range must be represented in the dataset before a model can be + // created. + // sentiment_max value must be between 1 and 10 (inclusive). int32 sentiment_max = 1; } // Model metadata that is specific to text sentiment. -message TextSentimentModelMetadata {} +message TextSentimentModelMetadata { + +} diff --git a/google/cloud/automl_v1beta1/proto/text_extraction.proto b/google/cloud/automl_v1beta1/proto/text_extraction.proto index 07f0dda8..cfb0e0b3 100644 --- a/google/cloud/automl_v1beta1/proto/text_extraction.proto +++ b/google/cloud/automl_v1beta1/proto/text_extraction.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py b/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py index 04dc759c..bdf49bf5 100644 --- a/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/text_extraction.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -25,12 +22,8 @@ name="google/cloud/automl_v1beta1/proto/text_extraction.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n7google/cloud/automl_v1beta1/proto/text_extraction.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x34google/cloud/automl_v1beta1/proto/text_segment.proto\x1a\x1cgoogle/api/annotations.proto"y\n\x18TextExtractionAnnotation\x12@\n\x0ctext_segment\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSegmentH\x00\x12\r\n\x05score\x18\x01 \x01(\x02\x42\x0c\n\nannotation"\x97\x02\n\x1fTextExtractionEvaluationMetrics\x12\x0e\n\x06\x61u_prc\x18\x01 \x01(\x02\x12w\n\x1a\x63onfidence_metrics_entries\x18\x02 \x03(\x0b\x32S.google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry\x1ak\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x03 \x01(\x02\x12\x11\n\tprecision\x18\x04 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x05 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n7google/cloud/automl_v1beta1/proto/text_extraction.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x34google/cloud/automl_v1beta1/proto/text_segment.proto\x1a\x1cgoogle/api/annotations.proto"y\n\x18TextExtractionAnnotation\x12@\n\x0ctext_segment\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSegmentH\x00\x12\r\n\x05score\x18\x01 \x01(\x02\x42\x0c\n\nannotation"\x97\x02\n\x1fTextExtractionEvaluationMetrics\x12\x0e\n\x06\x61u_prc\x18\x01 \x01(\x02\x12w\n\x1a\x63onfidence_metrics_entries\x18\x02 \x03(\x0b\x32S.google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry\x1ak\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x03 \x01(\x02\x12\x11\n\tprecision\x18\x04 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x05 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -96,7 +89,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=172, serialized_end=293, @@ -240,7 +233,7 @@ ), ], extensions=[], - nested_types=[_TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY], + nested_types=[_TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -277,10 +270,10 @@ TextExtractionAnnotation = _reflection.GeneratedProtocolMessageType( "TextExtractionAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_TEXTEXTRACTIONANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.text_extraction_pb2", - __doc__="""Annotation for identifying spans of text. + { + "DESCRIPTOR": _TEXTEXTRACTIONANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.text_extraction_pb2", + "__doc__": """Annotation for identifying spans of text. Attributes: @@ -296,21 +289,21 @@ annotation. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionAnnotation) - ), + }, ) _sym_db.RegisterMessage(TextExtractionAnnotation) TextExtractionEvaluationMetrics = _reflection.GeneratedProtocolMessageType( "TextExtractionEvaluationMetrics", (_message.Message,), - dict( - ConfidenceMetricsEntry=_reflection.GeneratedProtocolMessageType( + { + "ConfidenceMetricsEntry": _reflection.GeneratedProtocolMessageType( "ConfidenceMetricsEntry", (_message.Message,), - dict( - DESCRIPTOR=_TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, - __module__="google.cloud.automl_v1beta1.proto.text_extraction_pb2", - __doc__="""Metrics for a single confidence threshold. + { + "DESCRIPTOR": _TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.text_extraction_pb2", + "__doc__": """Metrics for a single confidence threshold. Attributes: @@ -326,11 +319,11 @@ Output only. The harmonic mean of recall and precision. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry) - ), + }, ), - DESCRIPTOR=_TEXTEXTRACTIONEVALUATIONMETRICS, - __module__="google.cloud.automl_v1beta1.proto.text_extraction_pb2", - __doc__="""Model evaluation metrics for text extraction problems. + "DESCRIPTOR": _TEXTEXTRACTIONEVALUATIONMETRICS, + "__module__": "google.cloud.automl_v1beta1.proto.text_extraction_pb2", + "__doc__": """Model evaluation metrics for text extraction problems. Attributes: @@ -341,7 +334,7 @@ Precision-recall curve can be derived from it. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics) - ), + }, ) _sym_db.RegisterMessage(TextExtractionEvaluationMetrics) _sym_db.RegisterMessage(TextExtractionEvaluationMetrics.ConfidenceMetricsEntry) diff --git a/google/cloud/automl_v1beta1/proto/text_pb2.py b/google/cloud/automl_v1beta1/proto/text_pb2.py index ea8fef3f..2418465c 100644 --- a/google/cloud/automl_v1beta1/proto/text_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/text.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,25 +12,21 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.cloud.automl_v1beta1.proto import ( classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2, ) +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1beta1/proto/text.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\tTextProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n,google/cloud/automl_v1beta1/proto/text.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto"q\n!TextClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"o\n\x1fTextClassificationModelMetadata\x12L\n\x13\x63lassification_type\x18\x03 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"\x1f\n\x1dTextExtractionDatasetMetadata"\x1d\n\x1bTextExtractionModelMetadata"5\n\x1cTextSentimentDatasetMetadata\x12\x15\n\rsentiment_max\x18\x01 \x01(\x05"\x1c\n\x1aTextSentimentModelMetadataB\xb0\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\tTextProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\tTextProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n,google/cloud/automl_v1beta1/proto/text.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto"q\n!TextClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"o\n\x1fTextClassificationModelMetadata\x12L\n\x13\x63lassification_type\x18\x03 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"\x1f\n\x1dTextExtractionDatasetMetadata"\x1d\n\x1bTextExtractionModelMetadata"5\n\x1cTextSentimentDatasetMetadata\x12\x15\n\rsentiment_max\x18\x01 \x01(\x05"\x1c\n\x1aTextSentimentModelMetadataB\xb0\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\tTextProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], ) @@ -62,7 +55,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -101,7 +94,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -180,7 +173,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -247,10 +240,10 @@ TextClassificationDatasetMetadata = _reflection.GeneratedProtocolMessageType( "TextClassificationDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TEXTCLASSIFICATIONDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.text_pb2", - __doc__="""Dataset metadata for classification. + { + "DESCRIPTOR": _TEXTCLASSIFICATIONDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", + "__doc__": """Dataset metadata for classification. Attributes: @@ -258,17 +251,17 @@ Required. Type of the classification problem. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextClassificationDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(TextClassificationDatasetMetadata) TextClassificationModelMetadata = _reflection.GeneratedProtocolMessageType( "TextClassificationModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TEXTCLASSIFICATIONMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.text_pb2", - __doc__="""Model metadata that is specific to text classification. + { + "DESCRIPTOR": _TEXTCLASSIFICATIONMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", + "__doc__": """Model metadata that is specific to text classification. Attributes: @@ -277,73 +270,72 @@ this model. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextClassificationModelMetadata) - ), + }, ) _sym_db.RegisterMessage(TextClassificationModelMetadata) TextExtractionDatasetMetadata = _reflection.GeneratedProtocolMessageType( "TextExtractionDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TEXTEXTRACTIONDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.text_pb2", - __doc__="""Dataset metadata that is specific to text extraction + { + "DESCRIPTOR": _TEXTEXTRACTIONDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", + "__doc__": """Dataset metadata that is specific to text extraction """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(TextExtractionDatasetMetadata) TextExtractionModelMetadata = _reflection.GeneratedProtocolMessageType( "TextExtractionModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TEXTEXTRACTIONMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.text_pb2", - __doc__="""Model metadata that is specific to text extraction. + { + "DESCRIPTOR": _TEXTEXTRACTIONMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", + "__doc__": """Model metadata that is specific to text extraction. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionModelMetadata) - ), + }, ) _sym_db.RegisterMessage(TextExtractionModelMetadata) TextSentimentDatasetMetadata = _reflection.GeneratedProtocolMessageType( "TextSentimentDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSENTIMENTDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.text_pb2", - __doc__="""Dataset metadata for text sentiment. + { + "DESCRIPTOR": _TEXTSENTIMENTDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", + "__doc__": """Dataset metadata for text sentiment. Attributes: sentiment_max: Required. A sentiment is expressed as an integer ordinal, where higher value means a more positive sentiment. The range - of sentiments that will be used is between 0 and - sentiment\_max (inclusive on both ends), and all the values in - the range must be represented in the dataset before a model - can be created. sentiment\_max value must be between 1 and 10 - (inclusive). + of sentiments that will be used is between 0 and sentiment_max + (inclusive on both ends), and all the values in the range must + be represented in the dataset before a model can be created. + sentiment_max value must be between 1 and 10 (inclusive). """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSentimentDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(TextSentimentDatasetMetadata) TextSentimentModelMetadata = _reflection.GeneratedProtocolMessageType( "TextSentimentModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSENTIMENTMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.text_pb2", - __doc__="""Model metadata that is specific to text sentiment. + { + "DESCRIPTOR": _TEXTSENTIMENTMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", + "__doc__": """Model metadata that is specific to text sentiment. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSentimentModelMetadata) - ), + }, ) _sym_db.RegisterMessage(TextSentimentModelMetadata) diff --git a/google/cloud/automl_v1beta1/proto/text_segment.proto b/google/cloud/automl_v1beta1/proto/text_segment.proto index 41b8be1c..94b17d93 100644 --- a/google/cloud/automl_v1beta1/proto/text_segment.proto +++ b/google/cloud/automl_v1beta1/proto/text_segment.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/text_segment_pb2.py b/google/cloud/automl_v1beta1/proto/text_segment_pb2.py index e896211e..5822253c 100644 --- a/google/cloud/automl_v1beta1/proto/text_segment_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_segment_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/text_segment.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -22,13 +19,9 @@ name="google/cloud/automl_v1beta1/proto/text_segment.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\020TextSegmentProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n4google/cloud/automl_v1beta1/proto/text_segment.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"H\n\x0bTextSegment\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\x14\n\x0cstart_offset\x18\x01 \x01(\x03\x12\x12\n\nend_offset\x18\x02 \x01(\x03\x42\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x10TextSegmentProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR], + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\020TextSegmentProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n4google/cloud/automl_v1beta1/proto/text_segment.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"H\n\x0bTextSegment\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\x14\n\x0cstart_offset\x18\x01 \x01(\x03\x12\x12\n\nend_offset\x18\x02 \x01(\x03\x42\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x10TextSegmentProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], ) @@ -48,7 +41,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -112,10 +105,10 @@ TextSegment = _reflection.GeneratedProtocolMessageType( "TextSegment", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSEGMENT, - __module__="google.cloud.automl_v1beta1.proto.text_segment_pb2", - __doc__="""A contiguous part of a text (string), assuming it has an + { + "DESCRIPTOR": _TEXTSEGMENT, + "__module__": "google.cloud.automl_v1beta1.proto.text_segment_pb2", + "__doc__": """A contiguous part of a text (string), assuming it has an UTF-8 NFC encoding. @@ -129,11 +122,11 @@ end_offset: Required. Zero-based character index of the first character past the end of the text segment (counting character from the - beginning of the text). The character at the end\_offset is - NOT included in the text segment. + beginning of the text). The character at the end_offset is NOT + included in the text segment. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSegment) - ), + }, ) _sym_db.RegisterMessage(TextSegment) diff --git a/google/cloud/automl_v1beta1/proto/text_sentiment.proto b/google/cloud/automl_v1beta1/proto/text_sentiment.proto index 978acb0f..5444c52b 100644 --- a/google/cloud/automl_v1beta1/proto/text_sentiment.proto +++ b/google/cloud/automl_v1beta1/proto/text_sentiment.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py b/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py index c55c9979..d4c0c728 100644 --- a/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/text_sentiment.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -25,12 +22,8 @@ name="google/cloud/automl_v1beta1/proto/text_sentiment.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\022TextSentimentProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n6google/cloud/automl_v1beta1/proto/text_sentiment.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto",\n\x17TextSentimentAnnotation\x12\x11\n\tsentiment\x18\x01 \x01(\x05"\xc5\x02\n\x1eTextSentimentEvaluationMetrics\x12\x11\n\tprecision\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x03 \x01(\x02\x12\x1b\n\x13mean_absolute_error\x18\x04 \x01(\x02\x12\x1a\n\x12mean_squared_error\x18\x05 \x01(\x02\x12\x14\n\x0clinear_kappa\x18\x06 \x01(\x02\x12\x17\n\x0fquadratic_kappa\x18\x07 \x01(\x02\x12\x66\n\x10\x63onfusion_matrix\x18\x08 \x01(\x0b\x32L.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix\x12\x1e\n\x12\x61nnotation_spec_id\x18\t \x03(\tB\x02\x18\x01\x42\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x12TextSentimentProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\022TextSentimentProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n6google/cloud/automl_v1beta1/proto/text_sentiment.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto",\n\x17TextSentimentAnnotation\x12\x11\n\tsentiment\x18\x01 \x01(\x05"\xc5\x02\n\x1eTextSentimentEvaluationMetrics\x12\x11\n\tprecision\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x03 \x01(\x02\x12\x1b\n\x13mean_absolute_error\x18\x04 \x01(\x02\x12\x1a\n\x12mean_squared_error\x18\x05 \x01(\x02\x12\x14\n\x0clinear_kappa\x18\x06 \x01(\x02\x12\x17\n\x0fquadratic_kappa\x18\x07 \x01(\x02\x12\x66\n\x10\x63onfusion_matrix\x18\x08 \x01(\x0b\x32L.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix\x12\x1e\n\x12\x61nnotation_spec_id\x18\t \x03(\tB\x02\x18\x01\x42\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x12TextSentimentProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -62,7 +55,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -243,7 +236,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\030\001"), + serialized_options=b"\030\001", file=DESCRIPTOR, ), ], @@ -273,10 +266,10 @@ TextSentimentAnnotation = _reflection.GeneratedProtocolMessageType( "TextSentimentAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSENTIMENTANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.text_sentiment_pb2", - __doc__="""Contains annotation details specific to text sentiment. + { + "DESCRIPTOR": _TEXTSENTIMENTANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.text_sentiment_pb2", + "__doc__": """Contains annotation details specific to text sentiment. Attributes: @@ -286,29 +279,29 @@ ata] when populating the dataset from which the model used for the prediction had been trained. The sentiment values are between 0 and - Dataset.text\_sentiment\_dataset\_metadata.sentiment\_max + Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive), with higher value meaning more positive - sentiment. They are completely relative, i.e. 0 means least - positive sentiment and sentiment\_max means the most positive - from the sentiments present in the train data. Therefore e.g. - if train data had only negative sentiment, then - sentiment\_max, would be still negative (although least - negative). The sentiment shouldn't be confused with "score" or - "magnitude" from the previous Natural Language Sentiment + sentiment. They are completely relative, i.e. 0 means least + positive sentiment and sentiment_max means the most positive + from the sentiments present in the train data. Therefore + e.g. if train data had only negative sentiment, then + sentiment_max, would be still negative (although least + negative). The sentiment shouldn’t be confused with “score” or + “magnitude” from the previous Natural Language Sentiment Analysis API. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSentimentAnnotation) - ), + }, ) _sym_db.RegisterMessage(TextSentimentAnnotation) TextSentimentEvaluationMetrics = _reflection.GeneratedProtocolMessageType( "TextSentimentEvaluationMetrics", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSENTIMENTEVALUATIONMETRICS, - __module__="google.cloud.automl_v1beta1.proto.text_sentiment_pb2", - __doc__="""Model evaluation metrics for text sentiment problems. + { + "DESCRIPTOR": _TEXTSENTIMENTEVALUATIONMETRICS, + "__module__": "google.cloud.automl_v1beta1.proto.text_sentiment_pb2", + "__doc__": """Model evaluation metrics for text sentiment problems. Attributes: @@ -343,7 +336,7 @@ Deprecated . """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSentimentEvaluationMetrics) - ), + }, ) _sym_db.RegisterMessage(TextSentimentEvaluationMetrics) diff --git a/google/cloud/automl_v1beta1/proto/translation.proto b/google/cloud/automl_v1beta1/proto/translation.proto index ed02b2af..8585bd41 100644 --- a/google/cloud/automl_v1beta1/proto/translation.proto +++ b/google/cloud/automl_v1beta1/proto/translation.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; +import "google/api/field_behavior.proto"; import "google/cloud/automl/v1beta1/data_items.proto"; import "google/api/annotations.proto"; @@ -30,10 +30,10 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // Dataset metadata that is specific to translation. message TranslationDatasetMetadata { // Required. The BCP-47 language code of the source language. - string source_language_code = 1; + string source_language_code = 1 [(google.api.field_behavior) = REQUIRED]; // Required. The BCP-47 language code of the target language. - string target_language_code = 2; + string target_language_code = 2 [(google.api.field_behavior) = REQUIRED]; } // Evaluation metrics for the dataset. diff --git a/google/cloud/automl_v1beta1/proto/translation_pb2.py b/google/cloud/automl_v1beta1/proto/translation_pb2.py index 15a08176..539d700c 100644 --- a/google/cloud/automl_v1beta1/proto/translation_pb2.py +++ b/google/cloud/automl_v1beta1/proto/translation_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/translation.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,6 +12,7 @@ _sym_db = _symbol_database.Default() +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 from google.cloud.automl_v1beta1.proto import ( data_items_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__items__pb2, ) @@ -25,13 +23,10 @@ name="google/cloud/automl_v1beta1/proto/translation.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\020TranslationProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n3google/cloud/automl_v1beta1/proto/translation.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a\x1cgoogle/api/annotations.proto"X\n\x1aTranslationDatasetMetadata\x12\x1c\n\x14source_language_code\x18\x01 \x01(\t\x12\x1c\n\x14target_language_code\x18\x02 \x01(\t"K\n\x1cTranslationEvaluationMetrics\x12\x12\n\nbleu_score\x18\x01 \x01(\x01\x12\x17\n\x0f\x62\x61se_bleu_score\x18\x02 \x01(\x01"j\n\x18TranslationModelMetadata\x12\x12\n\nbase_model\x18\x01 \x01(\t\x12\x1c\n\x14source_language_code\x18\x02 \x01(\t\x12\x1c\n\x14target_language_code\x18\x03 \x01(\t"]\n\x15TranslationAnnotation\x12\x44\n\x12translated_content\x18\x01 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippetB\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x10TranslationProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\020TranslationProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n3google/cloud/automl_v1beta1/proto/translation.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a\x1cgoogle/api/annotations.proto"b\n\x1aTranslationDatasetMetadata\x12!\n\x14source_language_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12!\n\x14target_language_code\x18\x02 \x01(\tB\x03\xe0\x41\x02"K\n\x1cTranslationEvaluationMetrics\x12\x12\n\nbleu_score\x18\x01 \x01(\x01\x12\x17\n\x0f\x62\x61se_bleu_score\x18\x02 \x01(\x01"j\n\x18TranslationModelMetadata\x12\x12\n\nbase_model\x18\x01 \x01(\t\x12\x1c\n\x14source_language_code\x18\x02 \x01(\t\x12\x1c\n\x14target_language_code\x18\x03 \x01(\t"]\n\x15TranslationAnnotation\x12\x44\n\x12translated_content\x18\x01 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippetB\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x10TranslationProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__items__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], @@ -54,13 +49,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -72,13 +67,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, ), ], @@ -90,8 +85,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=166, - serialized_end=254, + serialized_start=199, + serialized_end=297, ) @@ -147,8 +142,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=256, - serialized_end=331, + serialized_start=299, + serialized_end=374, ) @@ -168,7 +163,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -186,7 +181,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -204,7 +199,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -222,8 +217,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=333, - serialized_end=439, + serialized_start=376, + serialized_end=482, ) @@ -251,7 +246,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -261,8 +256,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=441, - serialized_end=534, + serialized_start=484, + serialized_end=577, ) _TRANSLATIONANNOTATION.fields_by_name[ @@ -283,10 +278,10 @@ TranslationDatasetMetadata = _reflection.GeneratedProtocolMessageType( "TranslationDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TRANSLATIONDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.translation_pb2", - __doc__="""Dataset metadata that is specific to translation. + { + "DESCRIPTOR": _TRANSLATIONDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", + "__doc__": """Dataset metadata that is specific to translation. Attributes: @@ -296,17 +291,17 @@ Required. The BCP-47 language code of the target language. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TranslationDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(TranslationDatasetMetadata) TranslationEvaluationMetrics = _reflection.GeneratedProtocolMessageType( "TranslationEvaluationMetrics", (_message.Message,), - dict( - DESCRIPTOR=_TRANSLATIONEVALUATIONMETRICS, - __module__="google.cloud.automl_v1beta1.proto.translation_pb2", - __doc__="""Evaluation metrics for the dataset. + { + "DESCRIPTOR": _TRANSLATIONEVALUATIONMETRICS, + "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", + "__doc__": """Evaluation metrics for the dataset. Attributes: @@ -316,17 +311,17 @@ Output only. BLEU score for base model. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TranslationEvaluationMetrics) - ), + }, ) _sym_db.RegisterMessage(TranslationEvaluationMetrics) TranslationModelMetadata = _reflection.GeneratedProtocolMessageType( "TranslationModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TRANSLATIONMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.translation_pb2", - __doc__="""Model metadata that is specific to translation. + { + "DESCRIPTOR": _TRANSLATIONMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", + "__doc__": """Model metadata that is specific to translation. Attributes: @@ -343,17 +338,17 @@ that is used for training. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TranslationModelMetadata) - ), + }, ) _sym_db.RegisterMessage(TranslationModelMetadata) TranslationAnnotation = _reflection.GeneratedProtocolMessageType( "TranslationAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_TRANSLATIONANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.translation_pb2", - __doc__="""Annotation details specific to translation. + { + "DESCRIPTOR": _TRANSLATIONANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", + "__doc__": """Annotation details specific to translation. Attributes: @@ -361,10 +356,12 @@ Output only . The translated content. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TranslationAnnotation) - ), + }, ) _sym_db.RegisterMessage(TranslationAnnotation) DESCRIPTOR._options = None +_TRANSLATIONDATASETMETADATA.fields_by_name["source_language_code"]._options = None +_TRANSLATIONDATASETMETADATA.fields_by_name["target_language_code"]._options = None # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/video.proto b/google/cloud/automl_v1beta1/proto/video.proto index b7c7325b..268ae2a8 100644 --- a/google/cloud/automl_v1beta1/proto/video.proto +++ b/google/cloud/automl_v1beta1/proto/video.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/video_pb2.py b/google/cloud/automl_v1beta1/proto/video_pb2.py index 1481681a..da658ee0 100644 --- a/google/cloud/automl_v1beta1/proto/video_pb2.py +++ b/google/cloud/automl_v1beta1/proto/video_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/video.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -25,12 +22,8 @@ name="google/cloud/automl_v1beta1/proto/video.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\nVideoProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n-google/cloud/automl_v1beta1/proto/video.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto"$\n"VideoClassificationDatasetMetadata"$\n"VideoObjectTrackingDatasetMetadata""\n VideoClassificationModelMetadata""\n VideoObjectTrackingModelMetadataB\xb1\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\nVideoProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\nVideoProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + serialized_pb=b'\n-google/cloud/automl_v1beta1/proto/video.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto"$\n"VideoClassificationDatasetMetadata"$\n"VideoObjectTrackingDatasetMetadata""\n VideoClassificationModelMetadata""\n VideoObjectTrackingModelMetadataB\xb1\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\nVideoProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -134,57 +127,57 @@ VideoClassificationDatasetMetadata = _reflection.GeneratedProtocolMessageType( "VideoClassificationDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOCLASSIFICATIONDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.video_pb2", - __doc__="""Dataset metadata specific to video classification. All + { + "DESCRIPTOR": _VIDEOCLASSIFICATIONDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", + "__doc__": """Dataset metadata specific to video classification. All Video Classification datasets are treated as multi label. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoClassificationDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(VideoClassificationDatasetMetadata) VideoObjectTrackingDatasetMetadata = _reflection.GeneratedProtocolMessageType( "VideoObjectTrackingDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOOBJECTTRACKINGDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.video_pb2", - __doc__="""Dataset metadata specific to video object tracking. + { + "DESCRIPTOR": _VIDEOOBJECTTRACKINGDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", + "__doc__": """Dataset metadata specific to video object tracking. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(VideoObjectTrackingDatasetMetadata) VideoClassificationModelMetadata = _reflection.GeneratedProtocolMessageType( "VideoClassificationModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOCLASSIFICATIONMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.video_pb2", - __doc__="""Model metadata specific to video classification. + { + "DESCRIPTOR": _VIDEOCLASSIFICATIONMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", + "__doc__": """Model metadata specific to video classification. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoClassificationModelMetadata) - ), + }, ) _sym_db.RegisterMessage(VideoClassificationModelMetadata) VideoObjectTrackingModelMetadata = _reflection.GeneratedProtocolMessageType( "VideoObjectTrackingModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOOBJECTTRACKINGMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.video_pb2", - __doc__="""Model metadata specific to video object tracking. + { + "DESCRIPTOR": _VIDEOOBJECTTRACKINGMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", + "__doc__": """Model metadata specific to video object tracking. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingModelMetadata) - ), + }, ) _sym_db.RegisterMessage(VideoObjectTrackingModelMetadata) diff --git a/noxfile.py b/noxfile.py index 8e90abd8..4e919f74 100644 --- a/noxfile.py +++ b/noxfile.py @@ -23,14 +23,15 @@ import nox -BLACK_VERSION = "black==19.3b0" +BLACK_VERSION = "black==19.10b0" BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] -if os.path.exists("samples"): - BLACK_PATHS.append("samples") +DEFAULT_PYTHON_VERSION = "3.8" +SYSTEM_TEST_PYTHON_VERSIONS = ["2.7", "3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["2.7", "3.5", "3.6", "3.7", "3.8"] -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def lint(session): """Run linters. @@ -38,7 +39,9 @@ def lint(session): serious code quality issues. """ session.install("flake8", BLACK_VERSION) - session.run("black", "--check", *BLACK_PATHS) + session.run( + "black", "--check", *BLACK_PATHS, + ) session.run("flake8", "google", "tests") @@ -53,10 +56,12 @@ def blacken(session): check the state of the `gcp_ubuntu_config` we use for that Kokoro run. """ session.install(BLACK_VERSION) - session.run("black", *BLACK_PATHS) + session.run( + "black", *BLACK_PATHS, + ) -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def lint_setup_py(session): """Verify that setup.py is valid (including RST check).""" session.install("docutils", "pygments") @@ -72,6 +77,7 @@ def default(session): session.run( "py.test", "--quiet", + "--cov=google.cloud.automl", "--cov=google.cloud", "--cov=tests.unit", "--cov-append", @@ -83,13 +89,13 @@ def default(session): ) -@nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"]) +@nox.session(python=UNIT_TEST_PYTHON_VERSIONS) def unit(session): """Run the unit test suite.""" default(session) -@nox.session(python=["2.7", "3.7"]) +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" system_test_path = os.path.join("tests", "system.py") @@ -109,7 +115,10 @@ def system(session): # Install all test dependencies, then install this package into the # virtualenv's dist-packages. - session.install("mock", "pytest", "google-cloud-testutils") + session.install( + "mock", "pytest", "google-cloud-testutils", + ) + session.install("-e", "test_utils") session.install("-e", ".[pandas,storage]") # Run py.test against the system tests. @@ -119,7 +128,7 @@ def system(session): session.run("py.test", "--quiet", system_test_folder_path, *session.posargs) -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def cover(session): """Run the final coverage report. @@ -132,12 +141,12 @@ def cover(session): session.run("coverage", "erase") -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def docs(session): """Build the docs for this library.""" session.install("-e", ".[pandas,storage]") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") + session.install("sphinx", "alabaster", "recommonmark") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh new file mode 100755 index 00000000..ff599eb2 --- /dev/null +++ b/scripts/decrypt-secrets.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +ROOT=$( dirname "$DIR" ) + +# Work from the project root. +cd $ROOT + +# Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources. +PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}" + +gcloud secrets versions access latest --secret="python-docs-samples-test-env" \ + > testing/test-env.sh +gcloud secrets versions access latest \ + --secret="python-docs-samples-service-account" \ + > testing/service-account.json +gcloud secrets versions access latest \ + --secret="python-docs-samples-client-secrets" \ + > testing/client-secrets.json \ No newline at end of file diff --git a/scripts/readme-gen/readme_gen.py b/scripts/readme-gen/readme_gen.py new file mode 100644 index 00000000..d309d6e9 --- /dev/null +++ b/scripts/readme-gen/readme_gen.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python + +# Copyright 2016 Google Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generates READMEs using configuration defined in yaml.""" + +import argparse +import io +import os +import subprocess + +import jinja2 +import yaml + + +jinja_env = jinja2.Environment( + trim_blocks=True, + loader=jinja2.FileSystemLoader( + os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates')))) + +README_TMPL = jinja_env.get_template('README.tmpl.rst') + + +def get_help(file): + return subprocess.check_output(['python', file, '--help']).decode() + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('source') + parser.add_argument('--destination', default='README.rst') + + args = parser.parse_args() + + source = os.path.abspath(args.source) + root = os.path.dirname(source) + destination = os.path.join(root, args.destination) + + jinja_env.globals['get_help'] = get_help + + with io.open(source, 'r') as f: + config = yaml.load(f) + + # This allows get_help to execute in the right directory. + os.chdir(root) + + output = README_TMPL.render(config) + + with io.open(destination, 'w') as f: + f.write(output) + + +if __name__ == '__main__': + main() diff --git a/scripts/readme-gen/templates/README.tmpl.rst b/scripts/readme-gen/templates/README.tmpl.rst new file mode 100644 index 00000000..4fd23976 --- /dev/null +++ b/scripts/readme-gen/templates/README.tmpl.rst @@ -0,0 +1,87 @@ +{# The following line is a lie. BUT! Once jinja2 is done with it, it will + become truth! #} +.. This file is automatically generated. Do not edit this file directly. + +{{product.name}} Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor={{folder}}/README.rst + + +This directory contains samples for {{product.name}}. {{product.description}} + +{{description}} + +.. _{{product.name}}: {{product.url}} + +{% if required_api_url %} +To run the sample, you need to enable the API at: {{required_api_url}} +{% endif %} + +{% if required_role %} +To run the sample, you need to have `{{required_role}}` role. +{% endif %} + +{{other_required_steps}} + +{% if setup %} +Setup +------------------------------------------------------------------------------- + +{% for section in setup %} + +{% include section + '.tmpl.rst' %} + +{% endfor %} +{% endif %} + +{% if samples %} +Samples +------------------------------------------------------------------------------- + +{% for sample in samples %} +{{sample.name}} ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +{% if not sample.hide_cloudshell_button %} +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor={{folder}}/{{sample.file}},{{folder}}/README.rst +{% endif %} + + +{{sample.description}} + +To run this sample: + +.. code-block:: bash + + $ python {{sample.file}} +{% if sample.show_help %} + + {{get_help(sample.file)|indent}} +{% endif %} + + +{% endfor %} +{% endif %} + +{% if cloud_client_library %} + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + +{% endif %} + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/scripts/readme-gen/templates/auth.tmpl.rst b/scripts/readme-gen/templates/auth.tmpl.rst new file mode 100644 index 00000000..1446b94a --- /dev/null +++ b/scripts/readme-gen/templates/auth.tmpl.rst @@ -0,0 +1,9 @@ +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started diff --git a/scripts/readme-gen/templates/auth_api_key.tmpl.rst b/scripts/readme-gen/templates/auth_api_key.tmpl.rst new file mode 100644 index 00000000..11957ce2 --- /dev/null +++ b/scripts/readme-gen/templates/auth_api_key.tmpl.rst @@ -0,0 +1,14 @@ +Authentication +++++++++++++++ + +Authentication for this service is done via an `API Key`_. To obtain an API +Key: + +1. Open the `Cloud Platform Console`_ +2. Make sure that billing is enabled for your project. +3. From the **Credentials** page, create a new **API Key** or use an existing + one for your project. + +.. _API Key: + https://developers.google.com/api-client-library/python/guide/aaa_apikeys +.. _Cloud Console: https://console.cloud.google.com/project?_ diff --git a/scripts/readme-gen/templates/install_deps.tmpl.rst b/scripts/readme-gen/templates/install_deps.tmpl.rst new file mode 100644 index 00000000..a0406dba --- /dev/null +++ b/scripts/readme-gen/templates/install_deps.tmpl.rst @@ -0,0 +1,29 @@ +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ diff --git a/scripts/readme-gen/templates/install_portaudio.tmpl.rst b/scripts/readme-gen/templates/install_portaudio.tmpl.rst new file mode 100644 index 00000000..5ea33d18 --- /dev/null +++ b/scripts/readme-gen/templates/install_portaudio.tmpl.rst @@ -0,0 +1,35 @@ +Install PortAudio ++++++++++++++++++ + +Install `PortAudio`_. This is required by the `PyAudio`_ library to stream +audio from your computer's microphone. PyAudio depends on PortAudio for cross-platform compatibility, and is installed differently depending on the +platform. + +* For Mac OS X, you can use `Homebrew`_:: + + brew install portaudio + + **Note**: if you encounter an error when running `pip install` that indicates + it can't find `portaudio.h`, try running `pip install` with the following + flags:: + + pip install --global-option='build_ext' \ + --global-option='-I/usr/local/include' \ + --global-option='-L/usr/local/lib' \ + pyaudio + +* For Debian / Ubuntu Linux:: + + apt-get install portaudio19-dev python-all-dev + +* Windows may work without having to install PortAudio explicitly (it will get + installed with PyAudio). + +For more details, see the `PyAudio installation`_ page. + + +.. _PyAudio: https://people.csail.mit.edu/hubert/pyaudio/ +.. _PortAudio: http://www.portaudio.com/ +.. _PyAudio installation: + https://people.csail.mit.edu/hubert/pyaudio/#downloads +.. _Homebrew: http://brew.sh diff --git a/setup.cfg b/setup.cfg index 3bd55550..c3a2b39f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,3 +1,19 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! [bdist_wheel] universal = 1 diff --git a/synth.metadata b/synth.metadata index 79b7aa93..1e9b3275 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,26 +1,25 @@ { - "updateTime": "2020-01-31T18:17:08.114692Z", "sources": [ { - "generator": { - "name": "artman", - "version": "0.44.4", - "dockerImage": "googleapis/artman@sha256:19e945954fc960a4bdfee6cb34695898ab21a8cf0bac063ee39b91f00a1faec8" + "git": { + "name": ".", + "remote": "https://github.com/googleapis/python-automl.git", + "sha": "d6f05a13483cdeccc2532669686d8f6472867bd7" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "2717b8a1c762b26911b45ecc2e4ee01d98401b28", - "internalRef": "292555664" + "sha": "e0f9d9e1f9de890db765be46f45ca8490723e3eb", + "internalRef": "309824146" } }, { - "template": { - "name": "python_split_library", - "origin": "synthtool.gcp", - "version": "2019.10.17" + "git": { + "name": "synthtool", + "remote": "https://github.com/googleapis/synthtool.git", + "sha": "cf2eff09d0f5319a4dc5cdce2b6356d85af4a798" } } ], @@ -31,8 +30,7 @@ "apiName": "automl", "apiVersion": "v1beta1", "language": "python", - "generator": "gapic", - "config": "google/cloud/automl/artman_automl_v1beta1.yaml" + "generator": "bazel" } }, { @@ -41,8 +39,7 @@ "apiName": "automl", "apiVersion": "v1", "language": "python", - "generator": "gapic", - "config": "google/cloud/automl/artman_automl_v1.yaml" + "generator": "bazel" } } ] diff --git a/testing/.gitignore b/testing/.gitignore new file mode 100644 index 00000000..b05fbd63 --- /dev/null +++ b/testing/.gitignore @@ -0,0 +1,3 @@ +test-env.sh +service-account.json +client-secrets.json \ No newline at end of file diff --git a/tests/unit/gapic/v1/test_auto_ml_client_v1.py b/tests/unit/gapic/v1/test_auto_ml_client_v1.py index 7a4558d2..0e83fb9a 100644 --- a/tests/unit/gapic/v1/test_auto_ml_client_v1.py +++ b/tests/unit/gapic/v1/test_auto_ml_client_v1.py @@ -70,23 +70,12 @@ class CustomException(Exception): class TestAutoMlClient(object): - def test_create_dataset(self): + def test_delete_dataset(self): # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - description = "description-1724546052" - example_count = 1517063674 - etag = "etag3123477" - expected_response = { - "name": name, - "display_name": display_name, - "description": description, - "example_count": example_count, - "etag": etag, - } - expected_response = dataset_pb2.Dataset(**expected_response) + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) operation = operations_pb2.Operation( - name="operations/test_create_dataset", done=True + name="operations/test_delete_dataset", done=True ) operation.response.Pack(expected_response) @@ -98,25 +87,72 @@ def test_create_dataset(self): client = automl_v1.AutoMlClient() # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - dataset = {} + name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - response = client.create_dataset(parent, dataset) + response = client.delete_dataset(name) result = response.result() assert expected_response == result assert len(channel.requests) == 1 - expected_request = service_pb2.CreateDatasetRequest( - parent=parent, dataset=dataset + expected_request = service_pb2.DeleteDatasetRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_dataset_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_delete_dataset_exception", done=True ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1.AutoMlClient() + + # Setup Request + name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") + + response = client.delete_dataset(name) + exception = response.exception() + assert exception.errors[0] == error + + def test_delete_model(self): + # Setup Expected Response + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_delete_model", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1.AutoMlClient() + + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + + response = client.delete_model(name) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = service_pb2.DeleteModelRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_create_dataset_exception(self): + def test_delete_model_exception(self): # Setup Response error = status_pb2.Status() operation = operations_pb2.Operation( - name="operations/test_create_dataset_exception", done=True + name="operations/test_delete_model_exception", done=True ) operation.error.CopyFrom(error) @@ -128,14 +164,13 @@ def test_create_dataset_exception(self): client = automl_v1.AutoMlClient() # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - dataset = {} + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - response = client.create_dataset(parent, dataset) + response = client.delete_model(name) exception = response.exception() assert exception.errors[0] == error - def test_update_dataset(self): + def test_create_dataset(self): # Setup Expected Response name = "name3373707" display_name = "displayName1615086568" @@ -150,42 +185,55 @@ def test_update_dataset(self): "etag": etag, } expected_response = dataset_pb2.Dataset(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_dataset", done=True + ) + operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[expected_response]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1.AutoMlClient() # Setup Request + parent = client.location_path("[PROJECT]", "[LOCATION]") dataset = {} - update_mask = {} - response = client.update_dataset(dataset, update_mask) - assert expected_response == response + response = client.create_dataset(parent, dataset) + result = response.result() + assert expected_response == result assert len(channel.requests) == 1 - expected_request = service_pb2.UpdateDatasetRequest( - dataset=dataset, update_mask=update_mask + expected_request = service_pb2.CreateDatasetRequest( + parent=parent, dataset=dataset ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_update_dataset_exception(self): + def test_create_dataset_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_dataset_exception", done=True + ) + operation.error.CopyFrom(error) + # Mock the API response - channel = ChannelStub(responses=[CustomException()]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1.AutoMlClient() - # Setup request + # Setup Request + parent = client.location_path("[PROJECT]", "[LOCATION]") dataset = {} - update_mask = {} - with pytest.raises(CustomException): - client.update_dataset(dataset, update_mask) + response = client.create_dataset(parent, dataset) + exception = response.exception() + assert exception.errors[0] == error def test_get_dataset(self): # Setup Expected Response @@ -278,55 +326,57 @@ def test_list_datasets_exception(self): with pytest.raises(CustomException): list(paged_list_response) - def test_delete_dataset(self): + def test_update_dataset(self): # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_delete_dataset", done=True - ) - operation.response.Pack(expected_response) + name = "name3373707" + display_name = "displayName1615086568" + description = "description-1724546052" + example_count = 1517063674 + etag = "etag3123477" + expected_response = { + "name": name, + "display_name": display_name, + "description": description, + "example_count": example_count, + "etag": etag, + } + expected_response = dataset_pb2.Dataset(**expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1.AutoMlClient() # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") + dataset = {} + update_mask = {} - response = client.delete_dataset(name) - result = response.result() - assert expected_response == result + response = client.update_dataset(dataset, update_mask) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = service_pb2.DeleteDatasetRequest(name=name) + expected_request = service_pb2.UpdateDatasetRequest( + dataset=dataset, update_mask=update_mask + ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_delete_dataset_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_delete_dataset_exception", done=True - ) - operation.error.CopyFrom(error) - + def test_update_dataset_exception(self): # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1.AutoMlClient() - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") + # Setup request + dataset = {} + update_mask = {} - response = client.delete_dataset(name) - exception = response.exception() - assert exception.errors[0] == error + with pytest.raises(CustomException): + client.update_dataset(dataset, update_mask) def test_import_data(self): # Setup Expected Response @@ -591,56 +641,6 @@ def test_get_model_exception(self): with pytest.raises(CustomException): client.get_model(name) - def test_update_model(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - dataset_id = "datasetId-2115646910" - etag = "etag3123477" - expected_response = { - "name": name, - "display_name": display_name, - "dataset_id": dataset_id, - "etag": etag, - } - expected_response = model_pb2.Model(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - model = {} - update_mask = {} - - response = client.update_model(model, update_mask) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.UpdateModelRequest( - model=model, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_model_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup request - model = {} - update_mask = {} - - with pytest.raises(CustomException): - client.update_model(model, update_mask) - def test_list_models(self): # Setup Expected Response next_page_token = "" @@ -684,55 +684,55 @@ def test_list_models_exception(self): with pytest.raises(CustomException): list(paged_list_response) - def test_delete_model(self): + def test_update_model(self): # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_delete_model", done=True - ) - operation.response.Pack(expected_response) + name = "name3373707" + display_name = "displayName1615086568" + dataset_id = "datasetId-2115646910" + etag = "etag3123477" + expected_response = { + "name": name, + "display_name": display_name, + "dataset_id": dataset_id, + "etag": etag, + } + expected_response = model_pb2.Model(**expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1.AutoMlClient() # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + model = {} + update_mask = {} - response = client.delete_model(name) - result = response.result() - assert expected_response == result + response = client.update_model(model, update_mask) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = service_pb2.DeleteModelRequest(name=name) + expected_request = service_pb2.UpdateModelRequest( + model=model, update_mask=update_mask + ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_delete_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_delete_model_exception", done=True - ) - operation.error.CopyFrom(error) - + def test_update_model_exception(self): # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1.AutoMlClient() - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + # Setup request + model = {} + update_mask = {} - response = client.delete_model(name) - exception = response.exception() - assert exception.errors[0] == error + with pytest.raises(CustomException): + client.update_model(model, update_mask) def test_deploy_model(self): # Setup Expected Response diff --git a/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py b/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py index 702a3190..9438e361 100644 --- a/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py +++ b/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py @@ -71,197 +71,6 @@ class CustomException(Exception): class TestAutoMlClient(object): - def test_create_dataset(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - description = "description-1724546052" - example_count = 1517063674 - etag = "etag3123477" - expected_response = { - "name": name, - "display_name": display_name, - "description": description, - "example_count": example_count, - "etag": etag, - } - expected_response = dataset_pb2.Dataset(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - dataset = {} - - response = client.create_dataset(parent, dataset) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.CreateDatasetRequest( - parent=parent, dataset=dataset - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_dataset_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - parent = client.location_path("[PROJECT]", "[LOCATION]") - dataset = {} - - with pytest.raises(CustomException): - client.create_dataset(parent, dataset) - - def test_update_dataset(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - description = "description-1724546052" - example_count = 1517063674 - etag = "etag3123477" - expected_response = { - "name": name, - "display_name": display_name, - "description": description, - "example_count": example_count, - "etag": etag, - } - expected_response = dataset_pb2.Dataset(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - dataset = {} - - response = client.update_dataset(dataset) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.UpdateDatasetRequest(dataset=dataset) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_dataset_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - dataset = {} - - with pytest.raises(CustomException): - client.update_dataset(dataset) - - def test_get_dataset(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - description = "description-1724546052" - example_count = 1517063674 - etag = "etag3123477" - expected_response = { - "name": name_2, - "display_name": display_name, - "description": description, - "example_count": example_count, - "etag": etag, - } - expected_response = dataset_pb2.Dataset(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - response = client.get_dataset(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.GetDatasetRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_dataset_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - with pytest.raises(CustomException): - client.get_dataset(name) - - def test_list_datasets(self): - # Setup Expected Response - next_page_token = "" - datasets_element = {} - datasets = [datasets_element] - expected_response = {"next_page_token": next_page_token, "datasets": datasets} - expected_response = service_pb2.ListDatasetsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - - paged_list_response = client.list_datasets(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.datasets[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = service_pb2.ListDatasetsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_datasets_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - parent = client.location_path("[PROJECT]", "[LOCATION]") - - paged_list_response = client.list_datasets(parent) - with pytest.raises(CustomException): - list(paged_list_response) - def test_delete_dataset(self): # Setup Expected Response expected_response = {} @@ -420,19 +229,12 @@ def test_export_data_exception(self): exception = response.exception() assert exception.errors[0] == error - def test_create_model(self): + def test_delete_model(self): # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - dataset_id = "datasetId-2115646910" - expected_response = { - "name": name, - "display_name": display_name, - "dataset_id": dataset_id, - } - expected_response = model_pb2.Model(**expected_response) + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) operation = operations_pb2.Operation( - name="operations/test_create_model", done=True + name="operations/test_delete_model", done=True ) operation.response.Pack(expected_response) @@ -444,23 +246,22 @@ def test_create_model(self): client = automl_v1beta1.AutoMlClient() # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - model = {} + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - response = client.create_model(parent, model) + response = client.delete_model(name) result = response.result() assert expected_response == result assert len(channel.requests) == 1 - expected_request = service_pb2.CreateModelRequest(parent=parent, model=model) + expected_request = service_pb2.DeleteModelRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_create_model_exception(self): + def test_delete_model_exception(self): # Setup Response error = status_pb2.Status() operation = operations_pb2.Operation( - name="operations/test_create_model_exception", done=True + name="operations/test_delete_model_exception", done=True ) operation.error.CopyFrom(error) @@ -472,27 +273,23 @@ def test_create_model_exception(self): client = automl_v1beta1.AutoMlClient() # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - model = {} + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - response = client.create_model(parent, model) + response = client.delete_model(name) exception = response.exception() assert exception.errors[0] == error - def test_get_model(self): + def test_export_model(self): # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - dataset_id = "datasetId-2115646910" - expected_response = { - "name": name_2, - "display_name": display_name, - "dataset_id": dataset_id, - } - expected_response = model_pb2.Model(**expected_response) + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_export_model", done=True + ) + operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[expected_response]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel @@ -500,78 +297,48 @@ def test_get_model(self): # Setup Request name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + output_config = {} - response = client.get_model(name) - assert expected_response == response + response = client.export_model(name, output_config) + result = response.result() + assert expected_response == result assert len(channel.requests) == 1 - expected_request = service_pb2.GetModelRequest(name=name) + expected_request = service_pb2.ExportModelRequest( + name=name, output_config=output_config + ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_get_model_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - with pytest.raises(CustomException): - client.get_model(name) - - def test_list_models(self): - # Setup Expected Response - next_page_token = "" - model_element = {} - model = [model_element] - expected_response = {"next_page_token": next_page_token, "model": model} - expected_response = service_pb2.ListModelsResponse(**expected_response) + def test_export_model_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_export_model_exception", done=True + ) + operation.error.CopyFrom(error) # Mock the API response - channel = ChannelStub(responses=[expected_response]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - - paged_list_response = client.list_models(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.model[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = service_pb2.ListModelsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_models_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - parent = client.location_path("[PROJECT]", "[LOCATION]") + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + output_config = {} - paged_list_response = client.list_models(parent) - with pytest.raises(CustomException): - list(paged_list_response) + response = client.export_model(name, output_config) + exception = response.exception() + assert exception.errors[0] == error - def test_delete_model(self): + def test_export_evaluated_examples(self): # Setup Expected Response expected_response = {} expected_response = empty_pb2.Empty(**expected_response) operation = operations_pb2.Operation( - name="operations/test_delete_model", done=True + name="operations/test_export_evaluated_examples", done=True ) operation.response.Pack(expected_response) @@ -584,21 +351,24 @@ def test_delete_model(self): # Setup Request name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + output_config = {} - response = client.delete_model(name) + response = client.export_evaluated_examples(name, output_config) result = response.result() assert expected_response == result assert len(channel.requests) == 1 - expected_request = service_pb2.DeleteModelRequest(name=name) + expected_request = service_pb2.ExportEvaluatedExamplesRequest( + name=name, output_config=output_config + ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_delete_model_exception(self): + def test_export_evaluated_examples_exception(self): # Setup Response error = status_pb2.Status() operation = operations_pb2.Operation( - name="operations/test_delete_model_exception", done=True + name="operations/test_export_evaluated_examples_exception", done=True ) operation.error.CopyFrom(error) @@ -611,124 +381,127 @@ def test_delete_model_exception(self): # Setup Request name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + output_config = {} - response = client.delete_model(name) + response = client.export_evaluated_examples(name, output_config) exception = response.exception() assert exception.errors[0] == error - def test_deploy_model(self): + def test_list_model_evaluations(self): # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_deploy_model", done=True + next_page_token = "" + model_evaluation_element = {} + model_evaluation = [model_evaluation_element] + expected_response = { + "next_page_token": next_page_token, + "model_evaluation": model_evaluation, + } + expected_response = service_pb2.ListModelEvaluationsResponse( + **expected_response ) - operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - response = client.deploy_model(name) - result = response.result() - assert expected_response == result + paged_list_response = client.list_model_evaluations(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.model_evaluation[0] == resources[0] assert len(channel.requests) == 1 - expected_request = service_pb2.DeployModelRequest(name=name) + expected_request = service_pb2.ListModelEvaluationsRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_deploy_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_deploy_model_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) + def test_list_model_evaluations_exception(self): + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + # Setup request + parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - response = client.deploy_model(name) - exception = response.exception() - assert exception.errors[0] == error + paged_list_response = client.list_model_evaluations(parent) + with pytest.raises(CustomException): + list(paged_list_response) - def test_undeploy_model(self): + def test_create_dataset(self): # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_undeploy_model", done=True - ) - operation.response.Pack(expected_response) + name = "name3373707" + display_name = "displayName1615086568" + description = "description-1724546052" + example_count = 1517063674 + etag = "etag3123477" + expected_response = { + "name": name, + "display_name": display_name, + "description": description, + "example_count": example_count, + "etag": etag, + } + expected_response = dataset_pb2.Dataset(**expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + parent = client.location_path("[PROJECT]", "[LOCATION]") + dataset = {} - response = client.undeploy_model(name) - result = response.result() - assert expected_response == result + response = client.create_dataset(parent, dataset) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = service_pb2.UndeployModelRequest(name=name) + expected_request = service_pb2.CreateDatasetRequest( + parent=parent, dataset=dataset + ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_undeploy_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_undeploy_model_exception", done=True - ) - operation.error.CopyFrom(error) - + def test_create_dataset_exception(self): # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + # Setup request + parent = client.location_path("[PROJECT]", "[LOCATION]") + dataset = {} - response = client.undeploy_model(name) - exception = response.exception() - assert exception.errors[0] == error + with pytest.raises(CustomException): + client.create_dataset(parent, dataset) - def test_get_model_evaluation(self): + def test_get_dataset(self): # Setup Expected Response name_2 = "name2-1052831874" - annotation_spec_id = "annotationSpecId60690191" display_name = "displayName1615086568" - evaluated_example_count = 277565350 + description = "description-1724546052" + example_count = 1517063674 + etag = "etag3123477" expected_response = { "name": name_2, - "annotation_spec_id": annotation_spec_id, "display_name": display_name, - "evaluated_example_count": evaluated_example_count, + "description": description, + "example_count": example_count, + "etag": etag, } - expected_response = model_evaluation_pb2.ModelEvaluation(**expected_response) + expected_response = dataset_pb2.Dataset(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) @@ -738,19 +511,17 @@ def test_get_model_evaluation(self): client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_evaluation_path( - "[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]" - ) + name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - response = client.get_model_evaluation(name) + response = client.get_dataset(name) assert expected_response == response assert len(channel.requests) == 1 - expected_request = service_pb2.GetModelEvaluationRequest(name=name) + expected_request = service_pb2.GetDatasetRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_get_model_evaluation_exception(self): + def test_get_dataset_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") @@ -759,133 +530,69 @@ def test_get_model_evaluation_exception(self): client = automl_v1beta1.AutoMlClient() # Setup request - name = client.model_evaluation_path( - "[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]" - ) + name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") with pytest.raises(CustomException): - client.get_model_evaluation(name) + client.get_dataset(name) - def test_export_model(self): + def test_list_datasets(self): # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_export_model", done=True - ) - operation.response.Pack(expected_response) + next_page_token = "" + datasets_element = {} + datasets = [datasets_element] + expected_response = {"next_page_token": next_page_token, "datasets": datasets} + expected_response = service_pb2.ListDatasetsResponse(**expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} + parent = client.location_path("[PROJECT]", "[LOCATION]") - response = client.export_model(name, output_config) - result = response.result() - assert expected_response == result + paged_list_response = client.list_datasets(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.datasets[0] == resources[0] assert len(channel.requests) == 1 - expected_request = service_pb2.ExportModelRequest( - name=name, output_config=output_config - ) + expected_request = service_pb2.ListDatasetsRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_export_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_export_model_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) + def test_list_datasets_exception(self): + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} - - response = client.export_model(name, output_config) - exception = response.exception() - assert exception.errors[0] == error - - def test_export_evaluated_examples(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_export_evaluated_examples", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} - - response = client.export_evaluated_examples(name, output_config) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.ExportEvaluatedExamplesRequest( - name=name, output_config=output_config - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_export_evaluated_examples_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_export_evaluated_examples_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} + # Setup request + parent = client.location_path("[PROJECT]", "[LOCATION]") - response = client.export_evaluated_examples(name, output_config) - exception = response.exception() - assert exception.errors[0] == error + paged_list_response = client.list_datasets(parent) + with pytest.raises(CustomException): + list(paged_list_response) - def test_list_model_evaluations(self): + def test_update_dataset(self): # Setup Expected Response - next_page_token = "" - model_evaluation_element = {} - model_evaluation = [model_evaluation_element] + name = "name3373707" + display_name = "displayName1615086568" + description = "description-1724546052" + example_count = 1517063674 + etag = "etag3123477" expected_response = { - "next_page_token": next_page_token, - "model_evaluation": model_evaluation, + "name": name, + "display_name": display_name, + "description": description, + "example_count": example_count, + "etag": etag, } - expected_response = service_pb2.ListModelEvaluationsResponse( - **expected_response - ) + expected_response = dataset_pb2.Dataset(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) @@ -895,20 +602,18 @@ def test_list_model_evaluations(self): client = automl_v1beta1.AutoMlClient() # Setup Request - parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - paged_list_response = client.list_model_evaluations(parent) - resources = list(paged_list_response) - assert len(resources) == 1 + dataset = {} - assert expected_response.model_evaluation[0] == resources[0] + response = client.update_dataset(dataset) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = service_pb2.ListModelEvaluationsRequest(parent=parent) + expected_request = service_pb2.UpdateDatasetRequest(dataset=dataset) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_list_model_evaluations_exception(self): + def test_update_dataset_exception(self): + # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: @@ -916,11 +621,10 @@ def test_list_model_evaluations_exception(self): client = automl_v1beta1.AutoMlClient() # Setup request - parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + dataset = {} - paged_list_response = client.list_model_evaluations(parent) with pytest.raises(CustomException): - list(paged_list_response) + client.update_dataset(dataset) def test_get_annotation_spec(self): # Setup Expected Response @@ -1253,3 +957,299 @@ def test_update_column_spec_exception(self): with pytest.raises(CustomException): client.update_column_spec(column_spec) + + def test_create_model(self): + # Setup Expected Response + name = "name3373707" + display_name = "displayName1615086568" + dataset_id = "datasetId-2115646910" + expected_response = { + "name": name, + "display_name": display_name, + "dataset_id": dataset_id, + } + expected_response = model_pb2.Model(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_model", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + parent = client.location_path("[PROJECT]", "[LOCATION]") + model = {} + + response = client.create_model(parent, model) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = service_pb2.CreateModelRequest(parent=parent, model=model) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_model_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_model_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + parent = client.location_path("[PROJECT]", "[LOCATION]") + model = {} + + response = client.create_model(parent, model) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_model(self): + # Setup Expected Response + name_2 = "name2-1052831874" + display_name = "displayName1615086568" + dataset_id = "datasetId-2115646910" + expected_response = { + "name": name_2, + "display_name": display_name, + "dataset_id": dataset_id, + } + expected_response = model_pb2.Model(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + + response = client.get_model(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = service_pb2.GetModelRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_model_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + + with pytest.raises(CustomException): + client.get_model(name) + + def test_list_models(self): + # Setup Expected Response + next_page_token = "" + model_element = {} + model = [model_element] + expected_response = {"next_page_token": next_page_token, "model": model} + expected_response = service_pb2.ListModelsResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + parent = client.location_path("[PROJECT]", "[LOCATION]") + + paged_list_response = client.list_models(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.model[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = service_pb2.ListModelsRequest(parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_models_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup request + parent = client.location_path("[PROJECT]", "[LOCATION]") + + paged_list_response = client.list_models(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_deploy_model(self): + # Setup Expected Response + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_deploy_model", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + + response = client.deploy_model(name) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = service_pb2.DeployModelRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_deploy_model_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_deploy_model_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + + response = client.deploy_model(name) + exception = response.exception() + assert exception.errors[0] == error + + def test_undeploy_model(self): + # Setup Expected Response + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_undeploy_model", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + + response = client.undeploy_model(name) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = service_pb2.UndeployModelRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_undeploy_model_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_undeploy_model_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + + response = client.undeploy_model(name) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_model_evaluation(self): + # Setup Expected Response + name_2 = "name2-1052831874" + annotation_spec_id = "annotationSpecId60690191" + display_name = "displayName1615086568" + evaluated_example_count = 277565350 + expected_response = { + "name": name_2, + "annotation_spec_id": annotation_spec_id, + "display_name": display_name, + "evaluated_example_count": evaluated_example_count, + } + expected_response = model_evaluation_pb2.ModelEvaluation(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + name = client.model_evaluation_path( + "[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]" + ) + + response = client.get_model_evaluation(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = service_pb2.GetModelEvaluationRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_model_evaluation_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup request + name = client.model_evaluation_path( + "[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]" + ) + + with pytest.raises(CustomException): + client.get_model_evaluation(name) diff --git a/tests/unit/gapic/v1beta1/test_prediction_service_client_v1beta1.py b/tests/unit/gapic/v1beta1/test_prediction_service_client_v1beta1.py index 9b510f3a..c83504a4 100644 --- a/tests/unit/gapic/v1beta1/test_prediction_service_client_v1beta1.py +++ b/tests/unit/gapic/v1beta1/test_prediction_service_client_v1beta1.py @@ -128,14 +128,18 @@ def test_batch_predict(self): name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") input_config = {} output_config = {} + params = {} - response = client.batch_predict(name, input_config, output_config) + response = client.batch_predict(name, input_config, output_config, params) result = response.result() assert expected_response == result assert len(channel.requests) == 1 expected_request = prediction_service_pb2.BatchPredictRequest( - name=name, input_config=input_config, output_config=output_config + name=name, + input_config=input_config, + output_config=output_config, + params=params, ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -159,7 +163,8 @@ def test_batch_predict_exception(self): name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") input_config = {} output_config = {} + params = {} - response = client.batch_predict(name, input_config, output_config) + response = client.batch_predict(name, input_config, output_config, params) exception = response.exception() assert exception.errors[0] == error From ea162ce8b7d9adaa922cb12457566cf623e0dd7d Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Tue, 23 Jun 2020 05:35:24 -0700 Subject: [PATCH 2/9] Upgrade protoc-docs-plugin version to 0.7.0 to fix unstable docstrings. PiperOrigin-RevId: 312689208 Source-Author: Google APIs Source-Date: Thu May 21 10:00:47 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: dec3204175104cef49bf21d685d5517caaf0058f Source-Link: https://github.com/googleapis/googleapis/commit/dec3204175104cef49bf21d685d5517caaf0058f --- .../automl_v1/proto/annotation_payload_pb2.py | 5 +- .../automl_v1/proto/annotation_spec_pb2.py | 2 - .../automl_v1/proto/classification_pb2.py | 10 - .../cloud/automl_v1/proto/data_items_pb2.py | 16 +- google/cloud/automl_v1/proto/dataset_pb2.py | 7 +- google/cloud/automl_v1/proto/detection_pb2.py | 17 +- google/cloud/automl_v1/proto/geometry_pb2.py | 10 +- google/cloud/automl_v1/proto/image_pb2.py | 20 +- google/cloud/automl_v1/proto/io_pb2.py | 1877 +++++++---------- .../automl_v1/proto/model_evaluation_pb2.py | 2 - google/cloud/automl_v1/proto/model_pb2.py | 2 - .../cloud/automl_v1/proto/operations_pb2.py | 52 +- .../automl_v1/proto/prediction_service_pb2.py | 24 +- google/cloud/automl_v1/proto/service_pb2.py | 64 +- .../automl_v1/proto/text_extraction_pb2.py | 6 - google/cloud/automl_v1/proto/text_pb2.py | 18 +- .../cloud/automl_v1/proto/text_segment_pb2.py | 6 +- .../automl_v1/proto/text_sentiment_pb2.py | 4 - .../cloud/automl_v1/proto/translation_pb2.py | 8 - .../proto/annotation_payload_pb2.py | 5 +- .../proto/annotation_spec_pb2.py | 2 - .../proto/classification_pb2.py | 21 +- .../automl_v1beta1/proto/column_spec_pb2.py | 14 +- .../automl_v1beta1/proto/data_items_pb2.py | 23 +- .../automl_v1beta1/proto/data_stats_pb2.py | 34 +- .../automl_v1beta1/proto/data_types_pb2.py | 8 +- .../cloud/automl_v1beta1/proto/dataset_pb2.py | 7 +- .../automl_v1beta1/proto/detection_pb2.py | 28 +- .../automl_v1beta1/proto/geometry_pb2.py | 10 +- .../cloud/automl_v1beta1/proto/image_pb2.py | 20 +- google/cloud/automl_v1beta1/proto/io_pb2.py | 1269 +++++------ .../proto/model_evaluation_pb2.py | 2 - .../cloud/automl_v1beta1/proto/model_pb2.py | 2 - .../automl_v1beta1/proto/operations_pb2.py | 61 +- .../proto/prediction_service_pb2.py | 24 +- .../cloud/automl_v1beta1/proto/ranges_pb2.py | 2 - .../automl_v1beta1/proto/regression_pb2.py | 2 - .../cloud/automl_v1beta1/proto/service_pb2.py | 138 +- .../automl_v1beta1/proto/table_spec_pb2.py | 12 +- .../cloud/automl_v1beta1/proto/tables_pb2.py | 12 +- .../automl_v1beta1/proto/temporal_pb2.py | 2 - .../proto/text_extraction_pb2.py | 6 - google/cloud/automl_v1beta1/proto/text_pb2.py | 18 +- .../automl_v1beta1/proto/text_segment_pb2.py | 6 +- .../proto/text_sentiment_pb2.py | 4 - .../automl_v1beta1/proto/translation_pb2.py | 8 - .../cloud/automl_v1beta1/proto/video_pb2.py | 18 +- synth.metadata | 4 +- 48 files changed, 1452 insertions(+), 2460 deletions(-) diff --git a/google/cloud/automl_v1/proto/annotation_payload_pb2.py b/google/cloud/automl_v1/proto/annotation_payload_pb2.py index 2455a7f5..e81ba3d7 100644 --- a/google/cloud/automl_v1/proto/annotation_payload_pb2.py +++ b/google/cloud/automl_v1/proto/annotation_payload_pb2.py @@ -267,10 +267,7 @@ { "DESCRIPTOR": _ANNOTATIONPAYLOAD, "__module__": "google.cloud.automl_v1.proto.annotation_payload_pb2", - "__doc__": """Contains annotation information that is relevant to - AutoML. - - + "__doc__": """Contains annotation information that is relevant to AutoML. Attributes: detail: Output only . Additional information about the annotation diff --git a/google/cloud/automl_v1/proto/annotation_spec_pb2.py b/google/cloud/automl_v1/proto/annotation_spec_pb2.py index 67c91bc8..852243bb 100644 --- a/google/cloud/automl_v1/proto/annotation_spec_pb2.py +++ b/google/cloud/automl_v1/proto/annotation_spec_pb2.py @@ -113,8 +113,6 @@ "DESCRIPTOR": _ANNOTATIONSPEC, "__module__": "google.cloud.automl_v1.proto.annotation_spec_pb2", "__doc__": """A definition of an annotation spec. - - Attributes: name: Output only. Resource name of the annotation spec. Form: ‘pro diff --git a/google/cloud/automl_v1/proto/classification_pb2.py b/google/cloud/automl_v1/proto/classification_pb2.py index 1f5ea076..9e2acd51 100644 --- a/google/cloud/automl_v1/proto/classification_pb2.py +++ b/google/cloud/automl_v1/proto/classification_pb2.py @@ -644,8 +644,6 @@ "DESCRIPTOR": _CLASSIFICATIONANNOTATION, "__module__": "google.cloud.automl_v1.proto.classification_pb2", "__doc__": """Contains annotation details specific to classification. - - Attributes: score: Output only. A confidence estimate between 0.0 and 1.0. A @@ -670,8 +668,6 @@ "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, "__module__": "google.cloud.automl_v1.proto.classification_pb2", "__doc__": """Metrics for a single confidence threshold. - - Attributes: confidence_threshold: Output only. Metrics are computed with an assumption that the @@ -738,8 +734,6 @@ "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW, "__module__": "google.cloud.automl_v1.proto.classification_pb2", "__doc__": """Output only. A row in the confusion matrix. - - Attributes: example_count: Output only. Value of the specific cell in the confusion @@ -755,8 +749,6 @@ "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX, "__module__": "google.cloud.automl_v1.proto.classification_pb2", "__doc__": """Confusion matrix of the model running the classification. - - Attributes: annotation_spec_id: Output only. IDs of the annotation specs used in the confusion @@ -786,8 +778,6 @@ "__doc__": """Model evaluation metrics for classification problems. Note: For Video Classification this metrics only describe quality of the Video Classification predictions of “segment_classification” type. - - Attributes: au_prc: Output only. The Area Under Precision-Recall Curve metric. diff --git a/google/cloud/automl_v1/proto/data_items_pb2.py b/google/cloud/automl_v1/proto/data_items_pb2.py index 33b3356b..67fa7463 100644 --- a/google/cloud/automl_v1/proto/data_items_pb2.py +++ b/google/cloud/automl_v1/proto/data_items_pb2.py @@ -695,10 +695,8 @@ { "DESCRIPTOR": _IMAGE, "__module__": "google.cloud.automl_v1.proto.data_items_pb2", - "__doc__": """A representation of an image. Only images up to 30MB in - size are supported. - - + "__doc__": """A representation of an image. Only images up to 30MB in size are + supported. Attributes: data: Input only. The data representing the image. For Predict calls @@ -723,8 +721,6 @@ "DESCRIPTOR": _TEXTSNIPPET, "__module__": "google.cloud.automl_v1.proto.data_items_pb2", "__doc__": """A representation of a text snippet. - - Attributes: content: Required. The content of the text snippet as a string. Up to @@ -751,8 +747,6 @@ "DESCRIPTOR": _DOCUMENTDIMENSIONS, "__module__": "google.cloud.automl_v1.proto.data_items_pb2", "__doc__": """Message that describes dimension of a document. - - Attributes: unit: Unit of the dimension. @@ -779,8 +773,6 @@ "__doc__": """Describes the layout information of a [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in the document. - - Attributes: text_segment: Text Segment that represents a segment in [document_text][goog @@ -808,8 +800,6 @@ "DESCRIPTOR": _DOCUMENT, "__module__": "google.cloud.automl_v1.proto.data_items_pb2", "__doc__": """A structured text document e.g. a PDF. - - Attributes: input_config: An input config specifying the content of the document. @@ -836,8 +826,6 @@ "DESCRIPTOR": _EXAMPLEPAYLOAD, "__module__": "google.cloud.automl_v1.proto.data_items_pb2", "__doc__": """Example data used for training or prediction. - - Attributes: payload: Required. The example data. diff --git a/google/cloud/automl_v1/proto/dataset_pb2.py b/google/cloud/automl_v1/proto/dataset_pb2.py index a79dd58a..8255aa56 100644 --- a/google/cloud/automl_v1/proto/dataset_pb2.py +++ b/google/cloud/automl_v1/proto/dataset_pb2.py @@ -450,11 +450,8 @@ ), "DESCRIPTOR": _DATASET, "__module__": "google.cloud.automl_v1.proto.dataset_pb2", - "__doc__": """A workspace for solving a single, particular machine - learning (ML) problem. A workspace contains examples that may be - annotated. - - + "__doc__": """A workspace for solving a single, particular machine learning (ML) + problem. A workspace contains examples that may be annotated. Attributes: dataset_metadata: Required. The dataset metadata that is specific to the problem diff --git a/google/cloud/automl_v1/proto/detection_pb2.py b/google/cloud/automl_v1/proto/detection_pb2.py index a8312424..6d03a5ea 100644 --- a/google/cloud/automl_v1/proto/detection_pb2.py +++ b/google/cloud/automl_v1/proto/detection_pb2.py @@ -361,8 +361,6 @@ "DESCRIPTOR": _IMAGEOBJECTDETECTIONANNOTATION, "__module__": "google.cloud.automl_v1.proto.detection_pb2", "__doc__": """Annotation details for image object detection. - - Attributes: bounding_box: Output only. The rectangle representing the object location. @@ -387,8 +385,6 @@ "DESCRIPTOR": _BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY, "__module__": "google.cloud.automl_v1.proto.detection_pb2", "__doc__": """Metrics for a single confidence threshold. - - Attributes: confidence_threshold: Output only. The confidence threshold value used to compute @@ -405,11 +401,8 @@ ), "DESCRIPTOR": _BOUNDINGBOXMETRICSENTRY, "__module__": "google.cloud.automl_v1.proto.detection_pb2", - "__doc__": """Bounding box matching model metrics for a single - intersection-over-union threshold and multiple label match confidence - thresholds. - - + "__doc__": """Bounding box matching model metrics for a single intersection-over- + union threshold and multiple label match confidence thresholds. Attributes: iou_threshold: Output only. The intersection-over-union threshold value used @@ -434,10 +427,8 @@ { "DESCRIPTOR": _IMAGEOBJECTDETECTIONEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1.proto.detection_pb2", - "__doc__": """Model evaluation metrics for image object detection - problems. Evaluates prediction quality of labeled bounding boxes. - - + "__doc__": """Model evaluation metrics for image object detection problems. + Evaluates prediction quality of labeled bounding boxes. Attributes: evaluated_bounding_box_count: Output only. The total number of bounding boxes (i.e. summed diff --git a/google/cloud/automl_v1/proto/geometry_pb2.py b/google/cloud/automl_v1/proto/geometry_pb2.py index 6cc88846..5858edb4 100644 --- a/google/cloud/automl_v1/proto/geometry_pb2.py +++ b/google/cloud/automl_v1/proto/geometry_pb2.py @@ -132,8 +132,6 @@ "DESCRIPTOR": _NORMALIZEDVERTEX, "__module__": "google.cloud.automl_v1.proto.geometry_pb2", "__doc__": """Required. Horizontal coordinate. - - Attributes: y: Required. Vertical coordinate. @@ -149,11 +147,9 @@ { "DESCRIPTOR": _BOUNDINGPOLY, "__module__": "google.cloud.automl_v1.proto.geometry_pb2", - "__doc__": """A bounding polygon of a detected object on a plane. On - output both vertices and normalized_vertices are provided. The polygon - is formed by connecting vertices in the order they are listed. - - + "__doc__": """A bounding polygon of a detected object on a plane. On output both + vertices and normalized_vertices are provided. The polygon is formed + by connecting vertices in the order they are listed. Attributes: normalized_vertices: Output only . The bounding polygon normalized vertices. diff --git a/google/cloud/automl_v1/proto/image_pb2.py b/google/cloud/automl_v1/proto/image_pb2.py index 46aea2e1..0b3e9589 100644 --- a/google/cloud/automl_v1/proto/image_pb2.py +++ b/google/cloud/automl_v1/proto/image_pb2.py @@ -483,8 +483,6 @@ "DESCRIPTOR": _IMAGECLASSIFICATIONDATASETMETADATA, "__module__": "google.cloud.automl_v1.proto.image_pb2", "__doc__": """Dataset metadata that is specific to image classification. - - Attributes: classification_type: Required. Type of the classification problem. @@ -500,9 +498,7 @@ { "DESCRIPTOR": _IMAGEOBJECTDETECTIONDATASETMETADATA, "__module__": "google.cloud.automl_v1.proto.image_pb2", - "__doc__": """Dataset metadata specific to image object detection. - - """, + "__doc__": """Dataset metadata specific to image object detection.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ImageObjectDetectionDatasetMetadata) }, ) @@ -515,8 +511,6 @@ "DESCRIPTOR": _IMAGECLASSIFICATIONMODELMETADATA, "__module__": "google.cloud.automl_v1.proto.image_pb2", "__doc__": """Model metadata for image classification. - - Attributes: base_model_id: Optional. The ID of the ``base`` model. If it is specified, @@ -605,8 +599,6 @@ "DESCRIPTOR": _IMAGEOBJECTDETECTIONMODELMETADATA, "__module__": "google.cloud.automl_v1.proto.image_pb2", "__doc__": """Model metadata specific to image object detection. - - Attributes: model_type: Optional. Type of the model. The available values are: \* @@ -677,10 +669,7 @@ { "DESCRIPTOR": _IMAGECLASSIFICATIONMODELDEPLOYMENTMETADATA, "__module__": "google.cloud.automl_v1.proto.image_pb2", - "__doc__": """Model deployment metadata specific to Image - Classification. - - + "__doc__": """Model deployment metadata specific to Image Classification. Attributes: node_count: Input only. The number of nodes to deploy the model on. A node @@ -700,10 +689,7 @@ { "DESCRIPTOR": _IMAGEOBJECTDETECTIONMODELDEPLOYMENTMETADATA, "__module__": "google.cloud.automl_v1.proto.image_pb2", - "__doc__": """Model deployment metadata specific to Image Object - Detection. - - + "__doc__": """Model deployment metadata specific to Image Object Detection. Attributes: node_count: Input only. The number of nodes to deploy the model on. A node diff --git a/google/cloud/automl_v1/proto/io_pb2.py b/google/cloud/automl_v1/proto/io_pb2.py index b7941101..db1213b2 100644 --- a/google/cloud/automl_v1/proto/io_pb2.py +++ b/google/cloud/automl_v1/proto/io_pb2.py @@ -619,625 +619,350 @@ "__module__": "google.cloud.automl_v1.proto.io_pb2", "__doc__": """Input configuration for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] action. - The format of input depends on dataset_metadata the Dataset into which the import is happening has. As input source the - [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] is expected, - unless specified otherwise. Additionally any input .CSV file by itself - must be 100MB or smaller, unless specified otherwise. If an “example” - file (that is, image, video etc.) with identical content (even if it had - different ``GCS_FILE_PATH``) is mentioned multiple times, then its - label, bounding boxes etc. are appended. The same file should be always - provided with the same ``ML_USE`` and ``GCS_FILE_PATH``, if it is not, - then these values are nondeterministically selected from the given ones. - - The formats are represented in EBNF with commas being literal and with - non-terminal symbols defined near the end of this comment. The formats - are: - - - - - - See `Preparing your training - data `__ for more - information. - - CSV file(s) with each line in format: - - :: - - ML_USE,GCS_FILE_PATH,LABEL,LABEL,... - - - ``ML_USE`` - Identifies the data set that the current row (file) - applies to. This value can be one of the following: - - - ``TRAIN`` - Rows in this file are used to train the model. - - ``TEST`` - Rows in this file are used to test the model during - training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They are - Automatically divided into train and test data. 80% for training - and 20% for testing. - - - ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image of - up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP, - .BMP, .TIFF, .ICO. - - - ``LABEL`` - A label that identifies the object in the image. - - For the ``MULTICLASS`` classification type, at most one ``LABEL`` is - allowed per image. If an image has not yet been labeled, then it should - be mentioned just once with no ``LABEL``. - - Some sample rows: - - :: - - TRAIN,gs://folder/image1.jpg,daisy - TEST,gs://folder/image2.jpg,dandelion,tulip,rose - UNASSIGNED,gs://folder/image3.jpg,daisy - UNASSIGNED,gs://folder/image4.jpg - - - - - - See `Preparing your training - data `__ - for more information. - - A CSV file(s) with each line in format: - - :: - - ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,) - - - ``ML_USE`` - Identifies the data set that the current row (file) - applies to. This value can be one of the following: - - - ``TRAIN`` - Rows in this file are used to train the model. - - ``TEST`` - Rows in this file are used to test the model during - training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They are - Automatically divided into train and test data. 80% for training - and 20% for testing. - - - ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image of - up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. Each - image is assumed to be exhaustively labeled. - - - ``LABEL`` - A label that identifies the object in the image specified - by the ``BOUNDING_BOX``. - - - ``BOUNDING BOX`` - The vertices of an object in the example image. - The minimum allowed ``BOUNDING_BOX`` edge length is 0.01, and no more - than 500 ``BOUNDING_BOX`` instances per image are allowed (one - ``BOUNDING_BOX`` per line). If an image has no looked for objects - then it should be mentioned just once with no LABEL and the “,,,,,,,” - in place of the ``BOUNDING_BOX``. - - **Four sample rows:** - - :: - - TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, - TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, - UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 - TEST,gs://folder/im3.png,,,,,,,,, - - - - - - - - - - See `Preparing your training - data `__ - for more information. - - CSV file(s) with each line in format: - - :: - - ML_USE,GCS_FILE_PATH - - For ``ML_USE``, do not use ``VALIDATE``. - - ``GCS_FILE_PATH`` is the path to another .csv file that describes - training example for a given ``ML_USE``, using the following row format: - - :: - - GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,) - - Here ``GCS_FILE_PATH`` leads to a video of up to 50GB in size and up to - 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. - + [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] is + expected, unless specified otherwise. Additionally any input .CSV file + by itself must be 100MB or smaller, unless specified otherwise. If an + “example” file (that is, image, video etc.) with identical content + (even if it had different ``GCS_FILE_PATH``) is mentioned multiple + times, then its label, bounding boxes etc. are appended. The same file + should be always provided with the same ``ML_USE`` and + ``GCS_FILE_PATH``, if it is not, then these values are + nondeterministically selected from the given ones. The formats are + represented in EBNF with commas being literal and with non-terminal + symbols defined near the end of this comment. The formats are: .. + raw:: html

AutoML Vision .. raw:: html

.. raw:: + html
.. raw:: html
+ .. raw:: html
Classification .. raw:: html
See + `Preparing your training data + `__ for more + information. CSV file(s) with each line in format: :: + ML_USE,GCS_FILE_PATH,LABEL,LABEL,... - ``ML_USE`` - Identifies the + data set that the current row (file) applies to. This value can be + one of the following: - ``TRAIN`` - Rows in this file are used to + train the model. - ``TEST`` - Rows in this file are used to test + the model during training. - ``UNASSIGNED`` - Rows in this + file are not categorized. They are Automatically divided into + train and test data. 80% for training and 20% for testing. - + ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image of + up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP, + .BMP, .TIFF, .ICO. - ``LABEL`` - A label that identifies the object + in the image. For the ``MULTICLASS`` classification type, at most one + ``LABEL`` is allowed per image. If an image has not yet been labeled, + then it should be mentioned just once with no ``LABEL``. Some sample + rows: :: TRAIN,gs://folder/image1.jpg,daisy + TEST,gs://folder/image2.jpg,dandelion,tulip,rose + UNASSIGNED,gs://folder/image3.jpg,daisy + UNASSIGNED,gs://folder/image4.jpg .. raw:: html
.. + raw:: html
.. raw:: html
Object Detection .. + raw:: html
See `Preparing your training data + `__ for more information. A CSV file(s) with + each line in format: :: + ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,) - ``ML_USE`` - + Identifies the data set that the current row (file) applies to. + This value can be one of the following: - ``TRAIN`` - Rows in + this file are used to train the model. - ``TEST`` - Rows in this + file are used to test the model during training. - + ``UNASSIGNED`` - Rows in this file are not categorized. They are + Automatically divided into train and test data. 80% for training + and 20% for testing. - ``GCS_FILE_PATH`` - The Google Cloud Storage + location of an image of up to 30MB in size. Supported extensions: + .JPEG, .GIF, .PNG. Each image is assumed to be exhaustively + labeled. - ``LABEL`` - A label that identifies the object in the + image specified by the ``BOUNDING_BOX``. - ``BOUNDING BOX`` - The + vertices of an object in the example image. The minimum allowed + ``BOUNDING_BOX`` edge length is 0.01, and no more than 500 + ``BOUNDING_BOX`` instances per image are allowed (one + ``BOUNDING_BOX`` per line). If an image has no looked for objects + then it should be mentioned just once with no LABEL and the “,,,,,,,” + in place of the ``BOUNDING_BOX``. **Four sample rows:** :: + TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, + TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, + UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 + TEST,gs://folder/im3.png,,,,,,,,, .. raw:: html
.. + raw:: html
.. raw:: html

AutoML Video + Intelligence .. raw:: html

.. raw:: html
.. raw:: html
.. raw:: html +
Classification .. raw:: html
See `Preparing your + training data `__ for more information. CSV + file(s) with each line in format: :: ML_USE,GCS_FILE_PATH For + ``ML_USE``, do not use ``VALIDATE``. ``GCS_FILE_PATH`` is the path to + another .csv file that describes training example for a given + ``ML_USE``, using the following row format: :: + GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,) Here + ``GCS_FILE_PATH`` leads to a video of up to 50GB in size and up to 3h + duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the - length of the video, and the end time must be after the start time. Any - segment of a video which has one or more labels on it, is considered a - hard negative for all other labels. Any segment with no labels on it is - considered to be unknown. If a whole video is unknown, then it should be - mentioned just once with “,,” in place of - ``LABEL, TIME_SEGMENT_START,TIME_SEGMENT_END``. - - Sample top level CSV file: - - :: - - TRAIN,gs://folder/train_videos.csv - TEST,gs://folder/test_videos.csv - UNASSIGNED,gs://folder/other_videos.csv - - Sample rows of a CSV file for a particular ML_USE: - - :: - - gs://folder/video1.avi,car,120,180.000021 - gs://folder/video1.avi,bike,150,180.000021 - gs://folder/vid2.avi,car,0,60.5 - gs://folder/vid3.avi,,, - - - - - - See `Preparing your training - data `__ for - more information. - - CSV file(s) with each line in format: - - :: - - ML_USE,GCS_FILE_PATH - - For ``ML_USE``, do not use ``VALIDATE``. - - ``GCS_FILE_PATH`` is the path to another .csv file that describes - training example for a given ``ML_USE``, using the following row format: - - :: - - GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX - - or - - :: - - GCS_FILE_PATH,,,,,,,,,, - - Here ``GCS_FILE_PATH`` leads to a video of up to 50GB in size and up to - 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. Providing - ``INSTANCE_ID``\ s can help to obtain a better model. When a specific - labeled entity leaves the video frame, and shows up afterwards it is not - required, albeit preferable, that the same ``INSTANCE_ID`` is given to - it. - - ``TIMESTAMP`` must be within the length of the video, the - ``BOUNDING_BOX`` is assumed to be drawn on the closest video’s frame to - the ``TIMESTAMP``. Any mentioned by the ``TIMESTAMP`` frame is expected - to be exhaustively labeled and no more than 500 ``BOUNDING_BOX``-es per - frame are allowed. If a whole video is unknown, then it should be - mentioned just once with “,,,,,,,,,,” in place of - ``LABEL, [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX``. - - Sample top level CSV file: - - :: - - TRAIN,gs://folder/train_videos.csv - TEST,gs://folder/test_videos.csv - UNASSIGNED,gs://folder/other_videos.csv - - Seven sample rows of a CSV file for a particular ML_USE: - - :: - - gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 - gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 - gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 - gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, - gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, - gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, - gs://folder/video2.avi,,,,,,,,,,, - - - - - - - - - - See `Preparing your training - data `__ for more - information. - - One or more CSV file(s) with each line in the following format: - - :: - - ML_USE,GCS_FILE_PATH - - - ``ML_USE`` - Identifies the data set that the current row (file) - applies to. This value can be one of the following: - - - ``TRAIN`` - Rows in this file are used to train the model. - - ``TEST`` - Rows in this file are used to test the model during - training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They are - Automatically divided into train and test data. 80% for training - and 20% for testing.. - - - ``GCS_FILE_PATH`` - a Identifies JSON Lines (.JSONL) file stored in - Google Cloud Storage that contains in-line text in-line as documents - for model training. - - After the training data set has been determined from the ``TRAIN`` and - ``UNASSIGNED`` CSV files, the training data is divided into train and - validation data sets. 70% for training and 30% for validation. - - For example: - - :: - - TRAIN,gs://folder/file1.jsonl - VALIDATE,gs://folder/file2.jsonl - TEST,gs://folder/file3.jsonl - - **In-line JSONL files** - - In-line .JSONL files contain, per line, a JSON document that wraps a - [``text_snippet``][google.cloud.automl.v1.TextSnippet] field followed by - one or more [``annotations``][google.cloud.automl.v1.AnnotationPayload] - fields, which have ``display_name`` and ``text_extraction`` fields to - describe the entity from the text snippet. Multiple JSON documents can - be separated using line breaks (``\\n``). - - The supplied text must be annotated exhaustively. For example, if you - include the text “horse”, but do not label it as “animal”, then “horse” - is assumed to not be an “animal”. - - Any given text snippet content must have 30,000 characters or less, and - also be UTF-8 NFC encoded. ASCII is accepted as it is UTF-8 NFC encoded. - - For example: - - :: - - { - "text_snippet": { - "content": "dog car cat" - }, - "annotations": [ - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 0, "end_offset": 2} - } - }, - { - "display_name": "vehicle", - "text_extraction": { - "text_segment": {"start_offset": 4, "end_offset": 6} - } - }, - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 8, "end_offset": 10} - } - } - ] - }\\n - { - "text_snippet": { - "content": "This dog is good." - }, - "annotations": [ - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 5, "end_offset": 7} - } - } - ] - } - - **JSONL files that reference documents** - - .JSONL files contain, per line, a JSON document that wraps a - ``input_config`` that contains the path to a source document. Multiple - JSON documents can be separated using line breaks (``\\n``). - - Supported document extensions: .PDF, .TIF, .TIFF - - For example: - - :: - - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] - } - } - } - }\\n - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] - } - } - } - } - - **In-line JSONL files with document layout information** - + length of the video, and the end time must be after the start time. + Any segment of a video which has one or more labels on it, is + considered a hard negative for all other labels. Any segment with no + labels on it is considered to be unknown. If a whole video is unknown, + then it should be mentioned just once with “,,” in place of ``LABEL, + TIME_SEGMENT_START,TIME_SEGMENT_END``. Sample top level CSV file: :: + TRAIN,gs://folder/train_videos.csv TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv Sample rows of a CSV file for + a particular ML_USE: :: gs://folder/video1.avi,car,120,180.000021 + gs://folder/video1.avi,bike,150,180.000021 + gs://folder/vid2.avi,car,0,60.5 gs://folder/vid3.avi,,, .. raw:: + html
.. raw:: html
.. raw:: html +
Object Tracking .. raw:: html
See `Preparing your + training data `__ for more information. CSV file(s) with each + line in format: :: ML_USE,GCS_FILE_PATH For ``ML_USE``, do not + use ``VALIDATE``. ``GCS_FILE_PATH`` is the path to another .csv file + that describes training example for a given ``ML_USE``, using the + following row format: :: + GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX or :: + GCS_FILE_PATH,,,,,,,,,, Here ``GCS_FILE_PATH`` leads to a video of up + to 50GB in size and up to 3h duration. Supported extensions: .MOV, + .MPEG4, .MP4, .AVI. Providing ``INSTANCE_ID``\ s can help to obtain a + better model. When a specific labeled entity leaves the video frame, + and shows up afterwards it is not required, albeit preferable, that + the same ``INSTANCE_ID`` is given to it. ``TIMESTAMP`` must be within + the length of the video, the ``BOUNDING_BOX`` is assumed to be drawn + on the closest video’s frame to the ``TIMESTAMP``. Any mentioned by + the ``TIMESTAMP`` frame is expected to be exhaustively labeled and no + more than 500 ``BOUNDING_BOX``-es per frame are allowed. If a whole + video is unknown, then it should be mentioned just once with + “,,,,,,,,,,” in place of ``LABEL, + [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX``. Sample top level CSV file: + :: TRAIN,gs://folder/train_videos.csv + TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv Seven sample rows of a CSV + file for a particular ML_USE: :: + gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 + gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 + gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 + gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, + gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, + gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, + gs://folder/video2.avi,,,,,,,,,,, .. raw:: html
.. + raw:: html
.. raw:: html

AutoML Natural Language + .. raw:: html

.. raw:: html
.. raw:: html
.. raw:: html
Entity + Extraction .. raw:: html
See `Preparing your training data + `__ for more + information. One or more CSV file(s) with each line in the following + format: :: ML_USE,GCS_FILE_PATH - ``ML_USE`` - Identifies the + data set that the current row (file) applies to. This value can be + one of the following: - ``TRAIN`` - Rows in this file are used to + train the model. - ``TEST`` - Rows in this file are used to test + the model during training. - ``UNASSIGNED`` - Rows in this + file are not categorized. They are Automatically divided into + train and test data. 80% for training and 20% for testing.. - + ``GCS_FILE_PATH`` - a Identifies JSON Lines (.JSONL) file stored in + Google Cloud Storage that contains in-line text in-line as documents + for model training. After the training data set has been determined + from the ``TRAIN`` and ``UNASSIGNED`` CSV files, the training data is + divided into train and validation data sets. 70% for training and 30% + for validation. For example: :: TRAIN,gs://folder/file1.jsonl + VALIDATE,gs://folder/file2.jsonl TEST,gs://folder/file3.jsonl + **In-line JSONL files** In-line .JSONL files contain, per line, a + JSON document that wraps a + [``text_snippet``][google.cloud.automl.v1.TextSnippet] field followed + by one or more + [``annotations``][google.cloud.automl.v1.AnnotationPayload] fields, + which have ``display_name`` and ``text_extraction`` fields to describe + the entity from the text snippet. Multiple JSON documents can be + separated using line breaks (``\\n``). The supplied text must + be annotated exhaustively. For example, if you include the text + “horse”, but do not label it as “animal”, then “horse” is assumed to + not be an “animal”. Any given text snippet content must have 30,000 + characters or less, and also be UTF-8 NFC encoded. ASCII is accepted + as it is UTF-8 NFC encoded. For example: :: { + "text_snippet": { "content": "dog car cat" }, + "annotations": [ { "display_name": "animal", + "text_extraction": { "text_segment": {"start_offset": 0, + "end_offset": 2} } }, { + "display_name": "vehicle", "text_extraction": { + "text_segment": {"start_offset": 4, "end_offset": 6} } + }, { "display_name": "animal", + "text_extraction": { "text_segment": {"start_offset": 8, + "end_offset": 10} } } ] }\\n { + "text_snippet": { "content": "This dog is good." }, + "annotations": [ { "display_name": "animal", + "text_extraction": { "text_segment": {"start_offset": 5, + "end_offset": 7} } } ] } **JSONL files + that reference documents** .JSONL files contain, per line, a JSON + document that wraps a ``input_config`` that contains the path to a + source document. Multiple JSON documents can be separated using line + breaks (``\\n``). Supported document extensions: .PDF, .TIF, + .TIFF For example: :: { "document": { + "input_config": { "gcs_source": { "input_uris": [ + "gs://folder/document1.pdf" ] } } } }\\n { + "document": { "input_config": { "gcs_source": { + "input_uris": [ "gs://folder/document2.tif" ] } } + } } **In-line JSONL files with document layout information** **Note:** You can only annotate documents using the UI. The format - described below applies to annotated documents exported using the UI or - ``exportData``. - - In-line .JSONL files for documents contain, per line, a JSON document - that wraps a ``document`` field that provides the textual content of the - document and the layout information. - - For example: - - :: - - { - "document": { - "document_text": { - "content": "dog car cat" - } - "layout": [ - { - "text_segment": { - "start_offset": 0, - "end_offset": 11, - }, - "page_number": 1, - "bounding_poly": { - "normalized_vertices": [ - {"x": 0.1, "y": 0.1}, - {"x": 0.1, "y": 0.3}, - {"x": 0.3, "y": 0.3}, - {"x": 0.3, "y": 0.1}, - ], - }, - "text_segment_type": TOKEN, - } - ], - "document_dimensions": { - "width": 8.27, - "height": 11.69, - "unit": INCH, - } - "page_count": 3, - }, - "annotations": [ - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 0, "end_offset": 3} - } - }, - { - "display_name": "vehicle", - "text_extraction": { - "text_segment": {"start_offset": 4, "end_offset": 7} - } - }, - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 8, "end_offset": 11} - } - }, - ], - - - - - - See `Preparing your training - data `__ - for more information. - - One or more CSV file(s) with each line in the following format: - - :: - - ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,... - - - ``ML_USE`` - Identifies the data set that the current row (file) - applies to. This value can be one of the following: - - - ``TRAIN`` - Rows in this file are used to train the model. - - ``TEST`` - Rows in this file are used to test the model during - training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They are - Automatically divided into train and test data. 80% for training - and 20% for testing. - - - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a - pattern. If the column content is a valid Google Cloud Storage file - path, that is, prefixed by “gs://”, it is treated as a - ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in double - quotes ("“), it is treated as a ``TEXT_SNIPPET``. For - ``GCS_FILE_PATH``, the path must lead to a file with supported - extension and UTF-8 encoding, for example,”gs://folder/content.txt" - AutoML imports the file content as a text snippet. For - ``TEXT_SNIPPET``, AutoML imports the column content excluding quotes. - In both cases, size of the content must be 10MB or less in size. For - zip files, the size of each file inside the zip must be 10MB or less - in size. - - For the ``MULTICLASS`` classification type, at most one ``LABEL`` is - allowed. - - The ``ML_USE`` and ``LABEL`` columns are optional. Supported file - extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP - - A maximum of 100 unique labels are allowed per CSV row. - - Sample rows: - - :: - - TRAIN,"They have bad food and very rude",RudeService,BadFood - gs://folder/content.txt,SlowService - TEST,gs://folder/document.pdf - VALIDATE,gs://folder/text_files.zip,BadFood - - - - - - See `Preparing your training - data `__ - for more information. - - CSV file(s) with each line in format: - - :: - - ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT - - - ``ML_USE`` - Identifies the data set that the current row (file) - applies to. This value can be one of the following: - - - ``TRAIN`` - Rows in this file are used to train the model. - - ``TEST`` - Rows in this file are used to test the model during - training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They are - Automatically divided into train and test data. 80% for training - and 20% for testing. - - - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a - pattern. If the column content is a valid Google Cloud Storage file - path, that is, prefixed by “gs://”, it is treated as a - ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in double - quotes ("“), it is treated as a ``TEXT_SNIPPET``. For - ``GCS_FILE_PATH``, the path must lead to a file with supported - extension and UTF-8 encoding, for example,”gs://folder/content.txt" - AutoML imports the file content as a text snippet. For - ``TEXT_SNIPPET``, AutoML imports the column content excluding quotes. - In both cases, size of the content must be 128kB or less in size. For - zip files, the size of each file inside the zip must be 128kB or less - in size. - - The ``ML_USE`` and ``SENTIMENT`` columns are optional. Supported file - extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP - - - ``SENTIMENT`` - An integer between 0 and - Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive). - Describes the ordinal of the sentiment - higher value means a more - positive sentiment. All the values are completely relative, - i.e. neither 0 needs to mean a negative or neutral sentiment nor - sentiment_max needs to mean a positive one - it is just required that - 0 is the least positive sentiment in the data, and sentiment_max is - the most positive one. The SENTIMENT shouldn’t be confused with - “score” or “magnitude” from the previous Natural Language Sentiment - Analysis API. All SENTIMENT values between 0 and sentiment_max must - be represented in the imported data. On prediction the same 0 to - sentiment_max range will be used. The difference between neighboring - sentiment values needs not to be uniform, e.g. 1 and 2 may be similar - whereas the difference between 2 and 3 may be large. - - Sample rows: - - :: - - TRAIN,"@freewrytin this is way too good for your product",2 - gs://folder/content.txt,3 - TEST,gs://folder/document.pdf - VALIDATE,gs://folder/text_files.zip,2 - - - - - - - - - - **For bigquery_source:** - - An URI of a BigQuery table. The user data size of the BigQuery table - must be 100GB or smaller. - - An imported table must have between 2 and 1,000 columns, inclusive, and - between 1000 and 100,000,000 rows, inclusive. There are at most 5 import - data running in parallel. - - - - - - **Input field definitions:** - - ``ML_USE`` - (“TRAIN” \| “VALIDATE” \| “TEST” \| “UNASSIGNED”) Describes how the - given example (file) should be used for model training. “UNASSIGNED” - can be used when user has no preference. - ``GCS_FILE_PATH`` - The path to a file on Google Cloud Storage. For example, - “gs://folder/image1.png”. - ``LABEL`` - A display name of an object on an image, video etc., e.g. “dog”. Must - be up to 32 characters long and can consist only of ASCII Latin - letters A-Z and a-z, underscores(_), and ASCII digits 0-9. For each - label an AnnotationSpec is created which display_name becomes the - label; AnnotationSpecs are given back in predictions. - ``INSTANCE_ID`` - A positive integer that identifies a specific instance of a labeled - entity on an example. Used e.g. to track two cars on a video while - being able to tell apart which one is which. - ``BOUNDING_BOX`` - (``VERTEX,VERTEX,VERTEX,VERTEX`` \| ``VERTEX,,,VERTEX,,``) A - rectangle parallel to the frame of the example (image, video). If 4 - vertices are given they are connected by edges in the order provided, - if 2 are given they are recognized as diagonally opposite vertices of - the rectangle. - ``VERTEX`` - (``COORDINATE,COORDINATE``) First coordinate is horizontal (x), the - second is vertical (y). - ``COORDINATE`` - A float in 0 to 1 range, relative to total length of image or video - in given dimension. For fractions the leading non-decimal 0 can be - omitted (i.e. 0.3 = .3). Point 0,0 is in top left. - ``TIME_SEGMENT_START`` - (``TIME_OFFSET``) Expresses a beginning, inclusive, of a time segment - within an example that has a time dimension (e.g. video). - ``TIME_SEGMENT_END`` - (``TIME_OFFSET``) Expresses an end, exclusive, of a time segment - within n example that has a time dimension (e.g. video). - ``TIME_OFFSET`` - A number of seconds as measured from the start of an example - (e.g. video). Fractions are allowed, up to a microsecond precision. - “inf” is allowed, and it means the end of the example. - ``TEXT_SNIPPET`` - The content of a text snippet, UTF-8 encoded, enclosed within double - quotes (""). - ``DOCUMENT`` - A field that provides the textual content with document and the - layout information. - - **Errors:** - - If any of the provided CSV files can’t be parsed or if more than certain - percent of CSV rows cannot be processed then the operation fails and - nothing is imported. Regardless of overall success or failure the - per-row failures, up to a certain count cap, is listed in + described below applies to annotated documents exported using the UI + or ``exportData``. In-line .JSONL files for documents contain, per + line, a JSON document that wraps a ``document`` field that provides + the textual content of the document and the layout information. For + example: :: { "document": { "document_text": { + "content": "dog car cat" } "layout": [ + { "text_segment": { + "start_offset": 0, "end_offset": 11, + }, "page_number": 1, + "bounding_poly": { "normalized_vertices": [ + {"x": 0.1, "y": 0.1}, {"x": 0.1, "y": 0.3}, + {"x": 0.3, "y": 0.3}, {"x": 0.3, "y": 0.1}, + ], }, "text_segment_type": + TOKEN, } ], + "document_dimensions": { "width": 8.27, + "height": 11.69, "unit": INCH, } + "page_count": 3, }, "annotations": [ + { "display_name": "animal", + "text_extraction": { "text_segment": {"start_offset": + 0, "end_offset": 3} } }, { + "display_name": "vehicle", "text_extraction": { + "text_segment": {"start_offset": 4, "end_offset": 7} } + }, { "display_name": "animal", + "text_extraction": { "text_segment": {"start_offset": + 8, "end_offset": 11} } }, ], + .. raw:: html
.. raw:: html
.. raw:: + html
Classification .. raw:: html
See `Preparing + your training data `__ for more information. One or more + CSV file(s) with each line in the following format: :: + ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,... - ``ML_USE`` - + Identifies the data set that the current row (file) applies to. + This value can be one of the following: - ``TRAIN`` - Rows in + this file are used to train the model. - ``TEST`` - Rows in this + file are used to test the model during training. - + ``UNASSIGNED`` - Rows in this file are not categorized. They are + Automatically divided into train and test data. 80% for training + and 20% for testing. - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are + distinguished by a pattern. If the column content is a valid Google + Cloud Storage file path, that is, prefixed by “gs://”, it is + treated as a ``GCS_FILE_PATH``. Otherwise, if the content is + enclosed in double quotes ("“), it is treated as a + ``TEXT_SNIPPET``. For ``GCS_FILE_PATH``, the path must lead to a + file with supported extension and UTF-8 encoding, for + example,”gs://folder/content.txt" AutoML imports the file content + as a text snippet. For ``TEXT_SNIPPET``, AutoML imports the column + content excluding quotes. In both cases, size of the content must + be 10MB or less in size. For zip files, the size of each file + inside the zip must be 10MB or less in size. For the + ``MULTICLASS`` classification type, at most one ``LABEL`` is + allowed. The ``ML_USE`` and ``LABEL`` columns are optional. + Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP A maximum + of 100 unique labels are allowed per CSV row. Sample rows: :: + TRAIN,"They have bad food and very rude",RudeService,BadFood + gs://folder/content.txt,SlowService TEST,gs://folder/document.pdf + VALIDATE,gs://folder/text_files.zip,BadFood .. raw:: html +
.. raw:: html
.. raw:: html
+ Sentiment Analysis .. raw:: html
See `Preparing your + training data `__ for more information. CSV file(s) + with each line in format: :: ML_USE,(TEXT_SNIPPET | + GCS_FILE_PATH),SENTIMENT - ``ML_USE`` - Identifies the data set that + the current row (file) applies to. This value can be one of the + following: - ``TRAIN`` - Rows in this file are used to train the + model. - ``TEST`` - Rows in this file are used to test the model + during training. - ``UNASSIGNED`` - Rows in this file are + not categorized. They are Automatically divided into train and + test data. 80% for training and 20% for testing. - + ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a + pattern. If the column content is a valid Google Cloud Storage file + path, that is, prefixed by “gs://”, it is treated as a + ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in double + quotes ("“), it is treated as a ``TEXT_SNIPPET``. For + ``GCS_FILE_PATH``, the path must lead to a file with supported + extension and UTF-8 encoding, for example,”gs://folder/content.txt" + AutoML imports the file content as a text snippet. For + ``TEXT_SNIPPET``, AutoML imports the column content excluding quotes. + In both cases, size of the content must be 128kB or less in size. For + zip files, the size of each file inside the zip must be 128kB or less + in size. The ``ML_USE`` and ``SENTIMENT`` columns are optional. + Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP - + ``SENTIMENT`` - An integer between 0 and + Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive). + Describes the ordinal of the sentiment - higher value means a more + positive sentiment. All the values are completely relative, + i.e. neither 0 needs to mean a negative or neutral sentiment nor + sentiment_max needs to mean a positive one - it is just required that + 0 is the least positive sentiment in the data, and sentiment_max is + the most positive one. The SENTIMENT shouldn’t be confused with + “score” or “magnitude” from the previous Natural Language Sentiment + Analysis API. All SENTIMENT values between 0 and sentiment_max must + be represented in the imported data. On prediction the same 0 to + sentiment_max range will be used. The difference between neighboring + sentiment values needs not to be uniform, e.g. 1 and 2 may be similar + whereas the difference between 2 and 3 may be large. Sample rows: :: + TRAIN,"@freewrytin this is way too good for your product",2 + gs://folder/content.txt,3 TEST,gs://folder/document.pdf + VALIDATE,gs://folder/text_files.zip,2 .. raw:: html
+ .. raw:: html
.. raw:: html

AutoML Tables .. + raw:: html

.. raw:: html
.. raw:: html
See `Preparing + your training data `__ for more information. You can use either + [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or + [bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source]. + All input is concatenated into a single [primary_table_spec_id][googl + e.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id] **For + gcs_source:** CSV file(s), where the first row of the first file is + the header, containing unique column names. If the first row of a + subsequent file is the same as the header, then it is also treated as + a header. All other rows contain values for the corresponding columns. + Each .CSV file by itself must be 10GB or smaller, and their total size + must be 100GB or smaller. First three sample rows of a CSV file: .. + raw:: html
    "Id","First Name","Last
+  Name","Dob","Addresses"     "1","John","Doe","1968-01-22","[{"status":
+  "current","address":"123_First_Avenue","city":"Seattle","state":"WA","
+  zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_
+  Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYea
+  rs":"5"}]"     "2","Jane","Doe","1980-10-16","[{"status":"current","ad
+  dress":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","nu
+  mberOfYears":"2"},{"status":"previous","address":"321_Main_Street","ci
+  ty":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
+  
**For bigquery_source:** An URI of a BigQuery table. The user + data size of the BigQuery table must be 100GB or smaller. An imported + table must have between 2 and 1,000 columns, inclusive, and between + 1000 and 100,000,000 rows, inclusive. There are at most 5 import data + running in parallel. .. raw:: html
.. raw:: html +
**Input field definitions:** ``ML_USE`` (“TRAIN” \| + “VALIDATE” \| “TEST” \| “UNASSIGNED”) Describes how the given + example (file) should be used for model training. “UNASSIGNED” can + be used when user has no preference. ``GCS_FILE_PATH`` The path to + a file on Google Cloud Storage. For example, + “gs://folder/image1.png”. ``LABEL`` A display name of an object on + an image, video etc., e.g. “dog”. Must be up to 32 characters long + and can consist only of ASCII Latin letters A-Z and a-z, + underscores(_), and ASCII digits 0-9. For each label an + AnnotationSpec is created which display_name becomes the label; + AnnotationSpecs are given back in predictions. ``INSTANCE_ID`` A + positive integer that identifies a specific instance of a labeled + entity on an example. Used e.g. to track two cars on a video while + being able to tell apart which one is which. ``BOUNDING_BOX`` + (``VERTEX,VERTEX,VERTEX,VERTEX`` \| ``VERTEX,,,VERTEX,,``) A + rectangle parallel to the frame of the example (image, video). If 4 + vertices are given they are connected by edges in the order provided, + if 2 are given they are recognized as diagonally opposite vertices of + the rectangle. ``VERTEX`` (``COORDINATE,COORDINATE``) First + coordinate is horizontal (x), the second is vertical (y). + ``COORDINATE`` A float in 0 to 1 range, relative to total length of + image or video in given dimension. For fractions the leading non- + decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is in top left. + ``TIME_SEGMENT_START`` (``TIME_OFFSET``) Expresses a beginning, + inclusive, of a time segment within an example that has a time + dimension (e.g. video). ``TIME_SEGMENT_END`` (``TIME_OFFSET``) + Expresses an end, exclusive, of a time segment within n example + that has a time dimension (e.g. video). ``TIME_OFFSET`` A number of + seconds as measured from the start of an example (e.g. video). + Fractions are allowed, up to a microsecond precision. “inf” is + allowed, and it means the end of the example. ``TEXT_SNIPPET`` The + content of a text snippet, UTF-8 encoded, enclosed within double + quotes (""). ``DOCUMENT`` A field that provides the textual content + with document and the layout information. **Errors:** If any of + the provided CSV files can’t be parsed or if more than certain percent + of CSV rows cannot be processed then the operation fails and nothing + is imported. Regardless of overall success or failure the per-row + failures, up to a certain count cap, is listed in Operation.metadata.partial_failures. - - Attributes: source: The source of the input. @@ -1249,281 +974,167 @@ params: Additional domain-specific parameters describing the semantic of the imported data, any string must be up to 25000 - characters long. - - - - One or more CSV files where each line is a single column: - - :: - - GCS_FILE_PATH - - The Google Cloud Storage location of an image of up to 30MB in size. - Supported extensions: .JPEG, .GIF, .PNG. This path is treated as the ID - in the batch predict output. - - Sample rows: - - :: - - gs://folder/image1.jpeg - gs://folder/image2.gif - gs://folder/image3.png - - - - - - One or more CSV files where each line is a single column: - - :: - - GCS_FILE_PATH - - The Google Cloud Storage location of an image of up to 30MB in size. - Supported extensions: .JPEG, .GIF, .PNG. This path is treated as the ID - in the batch predict output. - - Sample rows: - - :: - - gs://folder/image1.jpeg - gs://folder/image2.gif - gs://folder/image3.png - - - - - - - - - - One or more CSV files where each line is a single column: - - :: - - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END - - ``GCS_FILE_PATH`` is the Google Cloud Storage location of video up to - 50GB in size and up to 3h in duration duration. Supported extensions: - .MOV, .MPEG4, .MP4, .AVI. - - ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the - length of the video, and the end time must be after the start time. - - Sample rows: - - :: - - gs://folder/video1.mp4,10,40 - gs://folder/video1.mp4,20,60 - gs://folder/vid2.mov,0,inf - - - - - - One or more CSV files where each line is a single column: - - :: - - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END - - ``GCS_FILE_PATH`` is the Google Cloud Storage location of video up to - 50GB in size and up to 3h in duration duration. Supported extensions: - .MOV, .MPEG4, .MP4, .AVI. - - ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the - length of the video, and the end time must be after the start time. - - Sample rows: - - :: - - gs://folder/video1.mp4,10,40 - gs://folder/video1.mp4,20,60 - gs://folder/vid2.mov,0,inf - - - - - - - - - - One or more CSV files where each line is a single column: - - :: - - GCS_FILE_PATH - - ``GCS_FILE_PATH`` is the Google Cloud Storage location of a text file. - Supported file extensions: .TXT, .PDF, .TIF, .TIFF - - Text files can be no larger than 10MB in size. - - Sample rows: - - :: - - gs://folder/text1.txt - gs://folder/text2.pdf - gs://folder/text3.tif - - - - - - One or more CSV files where each line is a single column: - - :: - - GCS_FILE_PATH - + characters long. .. raw:: html

AutoML Tables .. + raw:: html

``schema_inference_version`` + (integer) This value must be supplied. The version of the + algorithm to use for the initial inference of the column + data types of the imported table. Allowed values: “1”. + """, + # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.InputConfig) + }, +) +_sym_db.RegisterMessage(InputConfig) +_sym_db.RegisterMessage(InputConfig.ParamsEntry) + +BatchPredictInputConfig = _reflection.GeneratedProtocolMessageType( + "BatchPredictInputConfig", + (_message.Message,), + { + "DESCRIPTOR": _BATCHPREDICTINPUTCONFIG, + "__module__": "google.cloud.automl_v1.proto.io_pb2", + "__doc__": """Input configuration for BatchPredict Action. The format of input + depends on the ML problem of the model used for prediction. As input + source the [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] + is expected, unless specified otherwise. The formats are represented + in EBNF with commas being literal and with non-terminal symbols + defined near the end of this comment. The formats are: .. raw:: html +

AutoML Vision .. raw:: html

.. raw:: html
.. raw:: html
.. raw:: html +
Classification .. raw:: html
One or more CSV files + where each line is a single column: :: GCS_FILE_PATH The Google + Cloud Storage location of an image of up to 30MB in size. Supported + extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in the + batch predict output. Sample rows: :: gs://folder/image1.jpeg + gs://folder/image2.gif gs://folder/image3.png .. raw:: html +
.. raw:: html
.. raw:: html
+ Object Detection .. raw:: html
One or more CSV files where + each line is a single column: :: GCS_FILE_PATH The Google Cloud + Storage location of an image of up to 30MB in size. Supported + extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in the + batch predict output. Sample rows: :: gs://folder/image1.jpeg + gs://folder/image2.gif gs://folder/image3.png .. raw:: html +
.. raw:: html
.. raw:: html

AutoML + Video Intelligence .. raw:: html

.. raw:: html
.. raw:: html
.. raw:: html +
Classification .. raw:: html
One or more CSV files + where each line is a single column: :: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END ``GCS_FILE_PATH`` + is the Google Cloud Storage location of video up to 50GB in size and + up to 3h in duration duration. Supported extensions: .MOV, .MPEG4, + .MP4, .AVI. ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be + within the length of the video, and the end time must be after the + start time. Sample rows: :: gs://folder/video1.mp4,10,40 + gs://folder/video1.mp4,20,60 gs://folder/vid2.mov,0,inf .. raw:: + html
.. raw:: html
.. raw:: html +
Object Tracking .. raw:: html
One or more CSV files + where each line is a single column: :: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END ``GCS_FILE_PATH`` + is the Google Cloud Storage location of video up to 50GB in size and + up to 3h in duration duration. Supported extensions: .MOV, .MPEG4, + .MP4, .AVI. ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be + within the length of the video, and the end time must be after the + start time. Sample rows: :: gs://folder/video1.mp4,10,40 + gs://folder/video1.mp4,20,60 gs://folder/vid2.mov,0,inf .. raw:: + html
.. raw:: html
.. raw:: html

+ AutoML Natural Language .. raw:: html

.. raw:: html +
.. raw:: html
.. raw:: + html
Classification .. raw:: html
One or more + CSV files where each line is a single column: :: GCS_FILE_PATH ``GCS_FILE_PATH`` is the Google Cloud Storage location of a text file. - Supported file extensions: .TXT, .PDF, .TIF, .TIFF - - Text files can be no larger than 128kB in size. - - Sample rows: - - :: - - gs://folder/text1.txt - gs://folder/text2.pdf - gs://folder/text3.tif - - - - - - One or more JSONL (JSON Lines) files that either provide inline text or - documents. You can only use one format, either inline text or documents, - for a single call to [AutoMl.BatchPredict]. - - Each JSONL file contains a per line a proto that wraps a temporary - user-assigned TextSnippet ID (string up to 2000 characters long) called - “id”, a TextSnippet proto (in JSON representation) and zero or more + Supported file extensions: .TXT, .PDF, .TIF, .TIFF Text files can be + no larger than 10MB in size. Sample rows: :: + gs://folder/text1.txt gs://folder/text2.pdf + gs://folder/text3.tif .. raw:: html
.. raw:: html +
.. raw:: html
Sentiment Analysis .. raw:: html +
One or more CSV files where each line is a single column: :: + GCS_FILE_PATH ``GCS_FILE_PATH`` is the Google Cloud Storage location + of a text file. Supported file extensions: .TXT, .PDF, .TIF, .TIFF + Text files can be no larger than 128kB in size. Sample rows: :: + gs://folder/text1.txt gs://folder/text2.pdf + gs://folder/text3.tif .. raw:: html
.. raw:: html +
.. raw:: html
Entity Extraction .. raw:: html +
One or more JSONL (JSON Lines) files that either provide inline + text or documents. You can only use one format, either inline text or + documents, for a single call to [AutoMl.BatchPredict]. Each JSONL + file contains a per line a proto that wraps a temporary user-assigned + TextSnippet ID (string up to 2000 characters long) called “id”, a + TextSnippet proto (in JSON representation) and zero or more TextFeature protos. Any given text snippet content must have 30,000 characters or less, and also be UTF-8 NFC encoded (ASCII already is). - The IDs provided should be unique. - - Each document JSONL file contains, per line, a proto that wraps a - Document proto with ``input_config`` set. Each document cannot exceed - 2MB in size. - - Supported document extensions: .PDF, .TIF, .TIFF - - Each JSONL file must not exceed 100MB in size, and no more than 20 JSONL - files may be passed. - - Sample inline JSONL file (Shown with artificial line breaks. Actual line - breaks are denoted by “``\\n``”.): - - :: - - { - "id": "my_first_id", - "text_snippet": { "content": "dog car cat"}, - "text_features": [ - { - "text_segment": {"start_offset": 4, "end_offset": 6}, - "structural_type": PARAGRAPH, - "bounding_poly": { - "normalized_vertices": [ - {"x": 0.1, "y": 0.1}, - {"x": 0.1, "y": 0.3}, - {"x": 0.3, "y": 0.3}, - {"x": 0.3, "y": 0.1}, - ] - }, - } - ], - }\\n - { - "id": "2", - "text_snippet": { - "content": "Extended sample content", - "mime_type": "text/plain" - } - } - - Sample document JSONL file (Shown with artificial line breaks. Actual - line breaks are denoted by “``\\n``”.): - - :: - - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] - } - } - } - }\\n - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] - } - } - } - } - - - - - - - - - - **For bigquery_source:** - - The URI of a BigQuery table. The user data size of the BigQuery table - must be 100GB or smaller. - - The column names must contain the model’s - - [input_feature_column_specs’][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs] - [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] (order - doesn’t matter). The columns corresponding to the model’s input feature - column specs must contain values compatible with the column spec’s data - types. Prediction on all the rows of the table will be attempted. - - - - - - **Input field definitions:** - - ``GCS_FILE_PATH`` - The path to a file on Google Cloud Storage. For example, - “gs://folder/video.avi”. - ``TIME_SEGMENT_START`` - (``TIME_OFFSET``) Expresses a beginning, inclusive, of a time segment - within an example that has a time dimension (e.g. video). - ``TIME_SEGMENT_END`` - (``TIME_OFFSET``) Expresses an end, exclusive, of a time segment - within n example that has a time dimension (e.g. video). - ``TIME_OFFSET`` - A number of seconds as measured from the start of an example - (e.g. video). Fractions are allowed, up to a microsecond precision. - “inf” is allowed, and it means the end of the example. - - **Errors:** - - If any of the provided CSV files can’t be parsed or if more than certain - percent of CSV rows cannot be processed then the operation fails and - prediction does not happen. Regardless of overall success or failure the - per-row failures, up to a certain count cap, will be listed in + The IDs provided should be unique. Each document JSONL file contains, + per line, a proto that wraps a Document proto with ``input_config`` + set. Each document cannot exceed 2MB in size. Supported document + extensions: .PDF, .TIF, .TIFF Each JSONL file must not exceed 100MB + in size, and no more than 20 JSONL files may be passed. Sample inline + JSONL file (Shown with artificial line breaks. Actual line breaks are + denoted by “``\\n``”.): :: { "id": "my_first_id", + "text_snippet": { "content": "dog car cat"}, "text_features": [ + { "text_segment": {"start_offset": 4, "end_offset": 6}, + "structural_type": PARAGRAPH, "bounding_poly": { + "normalized_vertices": [ {"x": 0.1, "y": 0.1}, + {"x": 0.1, "y": 0.3}, {"x": 0.3, "y": 0.3}, + {"x": 0.3, "y": 0.1}, ] }, } ], + }\\n { "id": "2", "text_snippet": { "content": + "Extended sample content", "mime_type": "text/plain" } + } Sample document JSONL file (Shown with artificial line breaks. + Actual line breaks are denoted by “``\\n``”.): :: { + "document": { "input_config": { "gcs_source": { + "input_uris": [ "gs://folder/document1.pdf" ] } } + } }\\n { "document": { "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] + } } } } .. raw:: html
.. raw:: html +
.. raw:: html

AutoML Tables .. raw:: html

+ .. raw:: html
.. raw:: html +
See `Preparing your training data + `__ for + more information. You can use either [gcs_source][google.cloud.automl + .v1.BatchPredictInputConfig.gcs_source] or + [bigquery_source][BatchPredictInputConfig.bigquery_source]. **For + gcs_source:** CSV file(s), each by itself 10GB or smaller and total + size must be 100GB or smaller, where first file must have a header + containing column names. If the first row of a subsequent file is the + same as the header, then it is also treated as a header. All other + rows contain values for the corresponding columns. The column names + must contain the model’s [input_feature_column_specs’][google.cloud.a + utoml.v1.TablesModelMetadata.input_feature_column_specs] + [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] + (order doesn’t matter). The columns corresponding to the model’s input + feature column specs must contain values compatible with the column + spec’s data types. Prediction on all the rows, i.e. the CSV lines, + will be attempted. Sample rows from a CSV file: .. raw:: html +
    "First Name","Last Name","Dob","Addresses"     "John","Doe","
+  1968-01-22","[{"status":"current","address":"123_First_Avenue","city":
+  "Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"p
+  revious","address":"456_Main_Street","city":"Portland","state":"OR","z
+  ip":"22222","numberOfYears":"5"}]"     "Jane","Doe","1980-10-16","[{"s
+  tatus":"current","address":"789_Any_Avenue","city":"Albany","state":"N
+  Y","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"
+  321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOf
+  Years":"3"}]}    
**For bigquery_source:** The URI of a + BigQuery table. The user data size of the BigQuery table must be 100GB + or smaller. The column names must contain the model’s [input_feature + _column_specs’][google.cloud.automl.v1.TablesModelMetadata.input_featu + re_column_specs] + [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] + (order doesn’t matter). The columns corresponding to the model’s input + feature column specs must contain values compatible with the column + spec’s data types. Prediction on all the rows of the table will be + attempted. .. raw:: html
.. raw:: html
+ **Input field definitions:** ``GCS_FILE_PATH`` The path to a file + on Google Cloud Storage. For example, “gs://folder/video.avi”. + ``TIME_SEGMENT_START`` (``TIME_OFFSET``) Expresses a beginning, + inclusive, of a time segment within an example that has a time + dimension (e.g. video). ``TIME_SEGMENT_END`` (``TIME_OFFSET``) + Expresses an end, exclusive, of a time segment within n example + that has a time dimension (e.g. video). ``TIME_OFFSET`` A number of + seconds as measured from the start of an example (e.g. video). + Fractions are allowed, up to a microsecond precision. “inf” is + allowed, and it means the end of the example. **Errors:** If any of + the provided CSV files can’t be parsed or if more than certain percent + of CSV rows cannot be processed then the operation fails and + prediction does not happen. Regardless of overall success or failure + the per-row failures, up to a certain count cap, will be listed in Operation.metadata.partial_failures. - - Attributes: source: The source of the input. @@ -1542,10 +1153,7 @@ { "DESCRIPTOR": _DOCUMENTINPUTCONFIG, "__module__": "google.cloud.automl_v1.proto.io_pb2", - "__doc__": """Input configuration of a - [Document][google.cloud.automl.v1.Document]. - - + "__doc__": """Input configuration of a [Document][google.cloud.automl.v1.Document]. Attributes: gcs_source: The Google Cloud Storage location of the document file. Only a @@ -1563,33 +1171,26 @@ { "DESCRIPTOR": _OUTPUTCONFIG, "__module__": "google.cloud.automl_v1.proto.io_pb2", - "__doc__": """\* For Translation: CSV file ``translation.csv``, with - each line in format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV - file which describes examples that have given ML_USE, using the - following row format per line: TEXT_SNIPPET (in source language) - \\tTEXT_SNIPPET (in target language) - - - For Tables: Output depends on whether the dataset was imported from - Google Cloud Storage or BigQuery. Google Cloud Storage case: - - [gcs_destination][google.cloud.automl.v1p1beta.OutputConfig.gcs_destination] - must be set. Exported are CSV file(s) ``tables_1.csv``, - ``tables_2.csv``,…,\ ``tables_N.csv`` with each having as header line - the table’s column names, and all other lines contain values for the - header columns. BigQuery case: - - [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] - pointing to a BigQuery project must be set. In the given project a new - dataset will be created with name - - ``export_data__`` - where will be made BigQuery-dataset-name compatible (e.g. most special - characters will become underscores), and timestamp will be in + "__doc__": """\* For Translation: CSV file ``translation.csv``, with each line in + format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV file which + describes examples that have given ML_USE, using the following row + format per line: TEXT_SNIPPET (in source language) :raw-latex:`\t + `TEXT_SNIPPET (in target language) - For Tables: Output depends on + whether the dataset was imported from Google Cloud Storage or + BigQuery. Google Cloud Storage case: [gcs_destination][google.cloud.a + utoml.v1p1beta.OutputConfig.gcs_destination] must be set. Exported are + CSV file(s) ``tables_1.csv``, ``tables_2.csv``,…,\ ``tables_N.csv`` + with each having as header line the table’s column names, and all + other lines contain values for the header columns. BigQuery case: [bi + gquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery + _destination] pointing to a BigQuery project must be set. In the given + project a new dataset will be created with name + ``export_data__`` where will be made BigQuery-dataset-name compatible (e.g. most + special characters will become underscores), and timestamp will be in YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In that dataset a new table called ``primary_table`` will be created, and filled with precisely the same data as this obtained on import. - - Attributes: destination: The destination of the output. @@ -1613,277 +1214,219 @@ { "DESCRIPTOR": _BATCHPREDICTOUTPUTCONFIG, "__module__": "google.cloud.automl_v1.proto.io_pb2", - "__doc__": """Output configuration for BatchPredict Action. - - As destination the - - [gcs_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs_destination] - must be set unless specified otherwise for a domain. If gcs_destination - is set then in the given directory a new directory is created. Its name - will be “prediction--”, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ - ISO-8601 format. The contents of it depends on the ML problem the - predictions are made for. - - - For Image Classification: In the created directory files - ``image_classification_1.jsonl``, - ``image_classification_2.jsonl``,…,\ ``image_classification_N.jsonl`` - will be created, where N may be 1, and depends on the total number of - the successfully predicted images and annotations. A single image - will be listed only once with all its annotations, and its - annotations will never be split across files. Each .JSONL file will - contain, per line, a JSON representation of a proto that wraps - image’s “ID” : “” followed by a list of zero or more - AnnotationPayload protos (called annotations), which have - classification detail populated. If prediction for any image failed - (partially or completely), then an additional ``errors_1.jsonl``, - ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N - depends on total number of failed predictions). These files will have - a JSON representation of a proto that wraps the same “ID” : “” but - here followed by exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``\ fields. - - - For Image Object Detection: In the created directory files - ``image_object_detection_1.jsonl``, - ``image_object_detection_2.jsonl``,…,\ ``image_object_detection_N.jsonl`` - will be created, where N may be 1, and depends on the total number of - the successfully predicted images and annotations. Each .JSONL file - will contain, per line, a JSON representation of a proto that wraps - image’s “ID” : “” followed by a list of zero or more - AnnotationPayload protos (called annotations), which have - image_object_detection detail populated. A single image will be - listed only once with all its annotations, and its annotations will - never be split across files. If prediction for any image failed - (partially or completely), then additional ``errors_1.jsonl``, - ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N - depends on total number of failed predictions). These files will have - a JSON representation of a proto that wraps the same “ID” : “” but - here followed by exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``\ fields. \* For Video - Classification: In the created directory a video_classification.csv - file, and a .JSON file per each video classification requested in the - input (i.e. each line in given CSV(s)), will be created. - - :: - - The format of video_classification.csv is: - - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS - where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to - 1 the prediction input lines (i.e. video_classification.csv has - precisely the same number of lines as the prediction input had.) - JSON_FILE_NAME = Name of .JSON file in the output directory, which - contains prediction responses for the video time segment. STATUS = “OK” - if prediction completed successfully, or an error code with message + "__doc__": """Output configuration for BatchPredict Action. As destination the [gc + s_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs_des + tination] must be set unless specified otherwise for a domain. If + gcs_destination is set then in the given directory a new directory is + created. Its name will be “prediction--”, where timestamp is in YYYY- + MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends on the + ML problem the predictions are made for. - For Image Classification: + In the created directory files ``image_classification_1.jsonl``, + ``image_classification_2.jsonl``,…,\ ``image_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total number of + the successfully predicted images and annotations. A single image + will be listed only once with all its annotations, and its + annotations will never be split across files. Each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + image’s “ID” : “” followed by a list of zero or more + AnnotationPayload protos (called annotations), which have + classification detail populated. If prediction for any image failed + (partially or completely), then an additional ``errors_1.jsonl``, + ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N + depends on total number of failed predictions). These files will have + a JSON representation of a proto that wraps the same “ID” : “” but + here followed by exactly one ```google.rpc.Status`` `__ + containing only ``code`` and ``message``\ fields. - For Image Object + Detection: In the created directory files + ``image_object_detection_1.jsonl``, + ``image_object_detection_2.jsonl``,…,\ + ``image_object_detection_N.jsonl`` will be created, where N may be + 1, and depends on the total number of the successfully predicted + images and annotations. Each .JSONL file will contain, per line, a + JSON representation of a proto that wraps image’s “ID” : “” + followed by a list of zero or more AnnotationPayload protos (called + annotations), which have image_object_detection detail populated. A + single image will be listed only once with all its annotations, and + its annotations will never be split across files. If prediction for + any image failed (partially or completely), then additional + ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files + will be created (N depends on total number of failed predictions). + These files will have a JSON representation of a proto that wraps + the same “ID” : “” but here followed by exactly one + ```google.rpc.Status`` `__ containing only ``code`` and + ``message``\ fields. \* For Video Classification: In the created + directory a video_classification.csv file, and a .JSON file per each + video classification requested in the input (i.e. each line in given + CSV(s)), will be created. :: The format of + video_classification.csv is: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SE + GMENT_END,JSON_FILE_NAME,STATUS where: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 the + prediction input lines (i.e. video_classification.csv has precisely + the same number of lines as the prediction input had.) JSON_FILE_NAME + = Name of .JSON file in the output directory, which contains + prediction responses for the video time segment. STATUS = “OK” if + prediction completed successfully, or an error code with message otherwise. If STATUS is not “OK” then the .JSON file for that line may - not exist or be empty. - - :: - - Each .JSON file, assuming STATUS is "OK", will contain a list of - AnnotationPayload protos in JSON format, which are the predictions - for the video time segment the file is assigned to in the - video_classification.csv. All AnnotationPayload protos will have - video_classification field set, and will be sorted by - video_classification.type field (note that the returned types are - governed by `classifaction_types` parameter in - [PredictService.BatchPredictRequest.params][]). - - - For Video Object Tracking: In the created directory a - video_object_tracking.csv file will be created, and multiple files - video_object_trackinng_1.json, video_object_trackinng_2.json,…, - video_object_trackinng_N.json, where N is the number of requests in - the input (i.e. the number of lines in given CSV(s)). - - :: - - The format of video_object_tracking.csv is: - - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS - where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to - 1 the prediction input lines (i.e. video_object_tracking.csv has - precisely the same number of lines as the prediction input had.) - JSON_FILE_NAME = Name of .JSON file in the output directory, which - contains prediction responses for the video time segment. STATUS = “OK” - if prediction completed successfully, or an error code with message + not exist or be empty. :: Each .JSON file, assuming STATUS is + "OK", will contain a list of AnnotationPayload protos in JSON + format, which are the predictions for the video time segment + the file is assigned to in the video_classification.csv. All + AnnotationPayload protos will have video_classification field + set, and will be sorted by video_classification.type field + (note that the returned types are governed by + `classifaction_types` parameter in + [PredictService.BatchPredictRequest.params][]). - For Video Object + Tracking: In the created directory a video_object_tracking.csv file + will be created, and multiple files video_object_trackinng_1.json, + video_object_trackinng_2.json,…, video_object_trackinng_N.json, + where N is the number of requests in the input (i.e. the number of + lines in given CSV(s)). :: The format of + video_object_tracking.csv is: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_S + EGMENT_END,JSON_FILE_NAME,STATUS where: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 the + prediction input lines (i.e. video_object_tracking.csv has precisely + the same number of lines as the prediction input had.) JSON_FILE_NAME + = Name of .JSON file in the output directory, which contains + prediction responses for the video time segment. STATUS = “OK” if + prediction completed successfully, or an error code with message otherwise. If STATUS is not “OK” then the .JSON file for that line may - not exist or be empty. - - :: - - Each .JSON file, assuming STATUS is "OK", will contain a list of - AnnotationPayload protos in JSON format, which are the predictions - for each frame of the video time segment the file is assigned to in - video_object_tracking.csv. All AnnotationPayload protos will have - video_object_tracking field set. - - - For Text Classification: In the created directory files - ``text_classification_1.jsonl``, - ``text_classification_2.jsonl``,…,\ ``text_classification_N.jsonl`` - will be created, where N may be 1, and depends on the total number of - inputs and annotations found. - - :: - - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text file (or document) in - the text snippet (or document) proto and a list of - zero or more AnnotationPayload protos (called annotations), which - have classification detail populated. A single text file (or - document) will be listed only once with all its annotations, and its - annotations will never be split across files. - - If prediction for any input file (or document) failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input file followed by exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``. - - - For Text Sentiment: In the created directory files - ``text_sentiment_1.jsonl``, - ``text_sentiment_2.jsonl``,…,\ ``text_sentiment_N.jsonl`` will be - created, where N may be 1, and depends on the total number of inputs - and annotations found. - - :: - - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text file (or document) in - the text snippet (or document) proto and a list of - zero or more AnnotationPayload protos (called annotations), which - have text_sentiment detail populated. A single text file (or - document) will be listed only once with all its annotations, and its - annotations will never be split across files. - - If prediction for any input file (or document) failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input file followed by exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``. - - - For Text Extraction: In the created directory files - ``text_extraction_1.jsonl``, - ``text_extraction_2.jsonl``,…,\ ``text_extraction_N.jsonl`` will be - created, where N may be 1, and depends on the total number of inputs - and annotations found. The contents of these .JSONL file(s) depend on - whether the input used inline text, or documents. If input was - inline, then each .JSONL file will contain, per line, a JSON - representation of a proto that wraps given in request text snippet’s - “id” (if specified), followed by input text snippet, and a list of - zero or more AnnotationPayload protos (called annotations), which - have text_extraction detail populated. A single text snippet will be - listed only once with all its annotations, and its annotations will - never be split across files. If input used documents, then each - .JSONL file will contain, per line, a JSON representation of a proto - that wraps given in request document proto, followed by its OCR-ed - representation in the form of a text snippet, finally followed by a - list of zero or more AnnotationPayload protos (called annotations), - which have text_extraction detail populated and refer, via their - indices, to the OCR-ed text snippet. A single document (and its text - snippet) will be listed only once with all its annotations, and its - annotations will never be split across files. If prediction for any - text snippet failed (partially or completely), then additional - ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files - will be created (N depends on total number of failed predictions). - These files will have a JSON representation of a proto that wraps - either the “id” : “” (in case of inline) or the document proto (in - case of document) but here followed by exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``. - - - For Tables: Output depends on whether - - [gcs_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destination] - or - - [bigquery_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.bigquery_destination] - is set (either is allowed). Google Cloud Storage case: In the created - directory files ``tables_1.csv``, ``tables_2.csv``,…, ``tables_N.csv`` + not exist or be empty. :: Each .JSON file, assuming STATUS is + "OK", will contain a list of AnnotationPayload protos in JSON + format, which are the predictions for each frame of the video + time segment the file is assigned to in + video_object_tracking.csv. All AnnotationPayload protos will have + video_object_tracking field set. - For Text Classification: In the + created directory files ``text_classification_1.jsonl``, + ``text_classification_2.jsonl``,…,\ ``text_classification_N.jsonl`` will be created, where N may be 1, and depends on the total number of - the successfully predicted rows. For all CLASSIFICATION - - [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: + inputs and annotations found. :: Each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + input text file (or document) in the text snippet (or document) + proto and a list of zero or more AnnotationPayload protos + (called annotations), which have classification detail + populated. A single text file (or document) will be listed only + once with all its annotations, and its annotations will never be + split across files. If prediction for any input file (or + document) failed (partially or completely), then additional + `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl` files + will be created (N depends on total number of failed + predictions). These files will have a JSON representation of a + proto that wraps input file followed by exactly one + ```google.rpc.Status`` `__ containing only ``code`` and + ``message``. - For Text Sentiment: In the created directory files + ``text_sentiment_1.jsonl``, ``text_sentiment_2.jsonl``,…,\ + ``text_sentiment_N.jsonl`` will be created, where N may be 1, and + depends on the total number of inputs and annotations found. :: + Each .JSONL file will contain, per line, a JSON representation of a + proto that wraps input text file (or document) in the text + snippet (or document) proto and a list of zero or more + AnnotationPayload protos (called annotations), which have + text_sentiment detail populated. A single text file (or + document) will be listed only once with all its annotations, and its + annotations will never be split across files. If prediction for + any input file (or document) failed (partially or completely), + then additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input file followed by exactly one + ```google.rpc.Status`` `__ containing only ``code`` and + ``message``. - For Text Extraction: In the created directory files + ``text_extraction_1.jsonl``, ``text_extraction_2.jsonl``,…,\ + ``text_extraction_N.jsonl`` will be created, where N may be 1, and + depends on the total number of inputs and annotations found. The + contents of these .JSONL file(s) depend on whether the input used + inline text, or documents. If input was inline, then each .JSONL + file will contain, per line, a JSON representation of a proto that + wraps given in request text snippet’s “id” (if specified), followed + by input text snippet, and a list of zero or more AnnotationPayload + protos (called annotations), which have text_extraction detail + populated. A single text snippet will be listed only once with all + its annotations, and its annotations will never be split across + files. If input used documents, then each .JSONL file will contain, + per line, a JSON representation of a proto that wraps given in + request document proto, followed by its OCR-ed representation in + the form of a text snippet, finally followed by a list of zero or + more AnnotationPayload protos (called annotations), which have + text_extraction detail populated and refer, via their indices, to + the OCR-ed text snippet. A single document (and its text snippet) + will be listed only once with all its annotations, and its + annotations will never be split across files. If prediction for any + text snippet failed (partially or completely), then additional + ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files + will be created (N depends on total number of failed predictions). + These files will have a JSON representation of a proto that wraps + either the “id” : “” (in case of inline) or the document proto (in + case of document) but here followed by exactly one + ```google.rpc.Status`` `__ containing only ``code`` and + ``message``. - For Tables: Output depends on whether [gcs_destinati + on][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destinat + ion] or [bigquery_destination][google.cloud.automl.v1p1beta.BatchPred + ictOutputConfig.bigquery_destination] is set (either is allowed). + Google Cloud Storage case: In the created directory files + ``tables_1.csv``, ``tables_2.csv``,…, ``tables_N.csv`` will be + created, where N may be 1, and depends on the total number of the + successfully predicted rows. For all CLASSIFICATION [prediction_type- + s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: Each .csv file will contain a header, listing all columns’ - [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] - given on input followed by M target column names in the format of - - "<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] - - [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>\_\_score" - where M is the number of distinct target values, i.e. number of distinct - values in the target column of the table used to train the model. - Subsequent lines will contain the respective values of successfully - predicted rows, with the last, i.e. the target, columns having the - corresponding prediction + given on input followed by M target column names in the format of "<[ + target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata. + target_column_spec] [display_name][google.cloud.automl.v1p1beta.Colum + nSpec.display_name]>\_\_score" where M is the number of distinct + target values, i.e. number of distinct values in the target column of + the table used to train the model. Subsequent lines will contain the + respective values of successfully predicted rows, with the last, + i.e. the target, columns having the corresponding prediction [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score]. For - REGRESSION and FORECASTING - - [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: - Each .csv file will contain a header, listing all columns’ + REGRESSION and FORECASTING [prediction_type-s][google.cloud.automl.v1 + p1beta.TablesModelMetadata.prediction_type]: Each .csv file will + contain a header, listing all columns’ [display_name-s][google.cloud.automl.v1p1beta.display_name] given on - input followed by the predicted target column with name in the format of - - "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] - + input followed by the predicted target column with name in the format + of "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.Tab + lesModelMetadata.target_column_spec] [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" Subsequent lines will contain the respective values of successfully predicted rows, with the last, i.e. the target, column having the predicted target value. If prediction for any rows failed, then an additional ``errors_1.csv``, ``errors_2.csv``,…, ``errors_N.csv`` will - be created (N depends on total number of failed rows). These files will - have analogous format as ``tables_*.csv``, but always with a single - target column having - - ```google.rpc.Status`` `__ + be created (N depends on total number of failed rows). These files + will have analogous format as ``tables_*.csv``, but always with a + single target column having ```google.rpc.Status`` `__ represented as a JSON string, and containing only ``code`` and - ``message``. BigQuery case: - - [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] - pointing to a BigQuery project must be set. In the given project a new - dataset will be created with name - ``prediction__`` where - will be made BigQuery-dataset-name compatible (e.g. most special - characters will become underscores), and timestamp will be in - YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In the dataset two - tables will be created, ``predictions``, and ``errors``. The - ``predictions`` table’s column names will be the input columns’ - + ``message``. BigQuery case: [bigquery_destination][google.cloud.autom + l.v1p1beta.OutputConfig.bigquery_destination] pointing to a BigQuery + project must be set. In the given project a new dataset will be + created with name ``prediction__`` where will be made BigQuery-dataset-name compatible + (e.g. most special characters will become underscores), and timestamp + will be in YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In the + dataset two tables will be created, ``predictions``, and ``errors``. + The ``predictions`` table’s column names will be the input columns’ [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] - followed by the target column with name in the format of - - "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] - + followed by the target column with name in the format of "predicted_< + [target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata + .target_column_spec] [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" The input feature columns will contain the respective values of successfully predicted rows, with the target column having an ARRAY of - [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload], represented as STRUCT-s, containing [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation]. The ``errors`` table contains rows for which the prediction has failed, it has analogous input columns while the target column name is in the - format of - - "errors_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] - - [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>", - and as a value has - - ```google.rpc.Status`` `__ - represented as a STRUCT, and containing only ``code`` and ``message``. - - + format of "errors_<[target_column_specs][google.cloud.automl.v1p1beta + .TablesModelMetadata.target_column_spec] [display_name][google.cloud. + automl.v1p1beta.ColumnSpec.display_name]>", and as a value has + ```google.rpc.Status`` `__ represented as a STRUCT, and + containing only ``code`` and ``message``. Attributes: destination: The destination of the output. @@ -1912,8 +1455,6 @@ "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG, "__module__": "google.cloud.automl_v1.proto.io_pb2", "__doc__": """Output configuration for ModelExport Action. - - Attributes: destination: The destination of the output. @@ -1972,8 +1513,6 @@ "DESCRIPTOR": _GCSSOURCE, "__module__": "google.cloud.automl_v1.proto.io_pb2", "__doc__": """The Google Cloud Storage location for the input content. - - Attributes: input_uris: Required. Google Cloud Storage URIs to input files, up to 2000 @@ -1991,10 +1530,8 @@ { "DESCRIPTOR": _GCSDESTINATION, "__module__": "google.cloud.automl_v1.proto.io_pb2", - "__doc__": """The Google Cloud Storage location where the output is to - be written to. - - + "__doc__": """The Google Cloud Storage location where the output is to be written + to. Attributes: output_uri_prefix: Required. Google Cloud Storage URI to output directory, up to diff --git a/google/cloud/automl_v1/proto/model_evaluation_pb2.py b/google/cloud/automl_v1/proto/model_evaluation_pb2.py index 4e6f2967..fac2cb32 100644 --- a/google/cloud/automl_v1/proto/model_evaluation_pb2.py +++ b/google/cloud/automl_v1/proto/model_evaluation_pb2.py @@ -327,8 +327,6 @@ "DESCRIPTOR": _MODELEVALUATION, "__module__": "google.cloud.automl_v1.proto.model_evaluation_pb2", "__doc__": """Evaluation results of a model. - - Attributes: metrics: Output only. Problem type specific evaluation metrics. diff --git a/google/cloud/automl_v1/proto/model_pb2.py b/google/cloud/automl_v1/proto/model_pb2.py index 63a66a6d..f329a36e 100644 --- a/google/cloud/automl_v1/proto/model_pb2.py +++ b/google/cloud/automl_v1/proto/model_pb2.py @@ -502,8 +502,6 @@ "DESCRIPTOR": _MODEL, "__module__": "google.cloud.automl_v1.proto.model_pb2", "__doc__": """API proto representing a trained machine learning model. - - Attributes: model_metadata: Required. The model metadata that is specific to the problem diff --git a/google/cloud/automl_v1/proto/operations_pb2.py b/google/cloud/automl_v1/proto/operations_pb2.py index 37c1fa6b..33df92f9 100644 --- a/google/cloud/automl_v1/proto/operations_pb2.py +++ b/google/cloud/automl_v1/proto/operations_pb2.py @@ -857,10 +857,8 @@ { "DESCRIPTOR": _OPERATIONMETADATA, "__module__": "google.cloud.automl_v1.proto.operations_pb2", - "__doc__": """Metadata used across all long running operations returned - by AutoML API. - - + "__doc__": """Metadata used across all long running operations returned by AutoML + API. Attributes: details: Ouptut only. Details of specific operation. Even if this field @@ -909,10 +907,7 @@ { "DESCRIPTOR": _DELETEOPERATIONMETADATA, "__module__": "google.cloud.automl_v1.proto.operations_pb2", - "__doc__": """Details of operations that perform deletes of any - entities. - - """, + "__doc__": """Details of operations that perform deletes of any entities.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DeleteOperationMetadata) }, ) @@ -924,9 +919,7 @@ { "DESCRIPTOR": _DEPLOYMODELOPERATIONMETADATA, "__module__": "google.cloud.automl_v1.proto.operations_pb2", - "__doc__": """Details of DeployModel operation. - - """, + "__doc__": """Details of DeployModel operation.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DeployModelOperationMetadata) }, ) @@ -938,9 +931,7 @@ { "DESCRIPTOR": _UNDEPLOYMODELOPERATIONMETADATA, "__module__": "google.cloud.automl_v1.proto.operations_pb2", - "__doc__": """Details of UndeployModel operation. - - """, + "__doc__": """Details of UndeployModel operation.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.UndeployModelOperationMetadata) }, ) @@ -952,9 +943,7 @@ { "DESCRIPTOR": _CREATEDATASETOPERATIONMETADATA, "__module__": "google.cloud.automl_v1.proto.operations_pb2", - "__doc__": """Details of CreateDataset operation. - - """, + "__doc__": """Details of CreateDataset operation.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.CreateDatasetOperationMetadata) }, ) @@ -966,9 +955,7 @@ { "DESCRIPTOR": _CREATEMODELOPERATIONMETADATA, "__module__": "google.cloud.automl_v1.proto.operations_pb2", - "__doc__": """Details of CreateModel operation. - - """, + "__doc__": """Details of CreateModel operation.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.CreateModelOperationMetadata) }, ) @@ -980,9 +967,7 @@ { "DESCRIPTOR": _IMPORTDATAOPERATIONMETADATA, "__module__": "google.cloud.automl_v1.proto.operations_pb2", - "__doc__": """Details of ImportData operation. - - """, + "__doc__": """Details of ImportData operation.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ImportDataOperationMetadata) }, ) @@ -1000,8 +985,6 @@ "__module__": "google.cloud.automl_v1.proto.operations_pb2", "__doc__": """Further describes this export data’s output. Supplements [OutputConfig][google.cloud.automl.v1.OutputConfig]. - - Attributes: output_location: The output location to which the exported data is written. @@ -1015,8 +998,6 @@ "DESCRIPTOR": _EXPORTDATAOPERATIONMETADATA, "__module__": "google.cloud.automl_v1.proto.operations_pb2", "__doc__": """Details of ExportData operation. - - Attributes: output_info: Output only. Information further describing this export data’s @@ -1038,11 +1019,8 @@ { "DESCRIPTOR": _BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO, "__module__": "google.cloud.automl_v1.proto.operations_pb2", - "__doc__": """Further describes this batch predict’s output. Supplements - - [BatchPredictOutputConfig][google.cloud.automl.v1.BatchPredictOutputConfig]. - - + "__doc__": """Further describes this batch predict’s output. Supplements [BatchPred + ictOutputConfig][google.cloud.automl.v1.BatchPredictOutputConfig]. Attributes: output_location: The output location into which prediction output is written. @@ -1056,8 +1034,6 @@ "DESCRIPTOR": _BATCHPREDICTOPERATIONMETADATA, "__module__": "google.cloud.automl_v1.proto.operations_pb2", "__doc__": """Details of BatchPredict operation. - - Attributes: input_config: Output only. The input config that was given upon starting @@ -1082,10 +1058,8 @@ { "DESCRIPTOR": _EXPORTMODELOPERATIONMETADATA_EXPORTMODELOUTPUTINFO, "__module__": "google.cloud.automl_v1.proto.operations_pb2", - "__doc__": """Further describes the output of model export. Supplements - [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. - - + "__doc__": """Further describes the output of model export. Supplements [ModelExport + OutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. Attributes: gcs_output_directory: The full path of the Google Cloud Storage directory created, @@ -1097,8 +1071,6 @@ "DESCRIPTOR": _EXPORTMODELOPERATIONMETADATA, "__module__": "google.cloud.automl_v1.proto.operations_pb2", "__doc__": """Details of ExportModel operation. - - Attributes: output_info: Output only. Information further describing the output of this diff --git a/google/cloud/automl_v1/proto/prediction_service_pb2.py b/google/cloud/automl_v1/proto/prediction_service_pb2.py index c7a138eb..d5a5f3f1 100644 --- a/google/cloud/automl_v1/proto/prediction_service_pb2.py +++ b/google/cloud/automl_v1/proto/prediction_service_pb2.py @@ -618,10 +618,8 @@ ), "DESCRIPTOR": _PREDICTREQUEST, "__module__": "google.cloud.automl_v1.proto.prediction_service_pb2", - "__doc__": """Request message for - [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. - - + "__doc__": """Request message for [PredictionService.Predict][google.cloud.automl.v1 + .PredictionService.Predict]. Attributes: name: Required. Name of the model requested to serve the prediction. @@ -669,10 +667,8 @@ ), "DESCRIPTOR": _PREDICTRESPONSE, "__module__": "google.cloud.automl_v1.proto.prediction_service_pb2", - "__doc__": """Response message for - [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. - - + "__doc__": """Response message for [PredictionService.Predict][google.cloud.automl.v + 1.PredictionService.Predict]. Attributes: payload: Prediction result. AutoML Translation and AutoML Natural @@ -720,10 +716,8 @@ ), "DESCRIPTOR": _BATCHPREDICTREQUEST, "__module__": "google.cloud.automl_v1.proto.prediction_service_pb2", - "__doc__": """Request message for - [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. - - + "__doc__": """Request message for [PredictionService.BatchPredict][google.cloud.auto + ml.v1.PredictionService.BatchPredict]. Attributes: name: Required. Name of the model requested to serve the batch @@ -813,10 +807,8 @@ "__module__": "google.cloud.automl_v1.proto.prediction_service_pb2", "__doc__": """Result of the Batch Predict. This message is returned in [response][google.longrunning.Operation.response] of the operation - returned by the - [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. - - + returned by the [PredictionService.BatchPredict][google.cloud.automl.v + 1.PredictionService.BatchPredict]. Attributes: metadata: Additional domain-specific prediction response metadata. diff --git a/google/cloud/automl_v1/proto/service_pb2.py b/google/cloud/automl_v1/proto/service_pb2.py index 112edb18..b01609e8 100644 --- a/google/cloud/automl_v1/proto/service_pb2.py +++ b/google/cloud/automl_v1/proto/service_pb2.py @@ -1376,8 +1376,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. - - Attributes: parent: Required. The resource name of the project to create the @@ -1398,8 +1396,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. - - Attributes: name: Required. The resource name of the dataset to retrieve. @@ -1417,8 +1413,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. - - Attributes: parent: Required. The resource name of the project from which to list @@ -1453,8 +1447,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Response message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. - - Attributes: datasets: The datasets read. @@ -1476,8 +1468,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] - - Attributes: dataset: Required. The dataset which replaces the resource on the @@ -1498,8 +1488,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. - - Attributes: name: Required. The resource name of the dataset to delete. @@ -1517,8 +1505,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. - - Attributes: name: Required. Dataset name. Dataset must already exist. All @@ -1540,8 +1526,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. - - Attributes: name: Required. The resource name of the dataset. @@ -1559,10 +1543,8 @@ { "DESCRIPTOR": _GETANNOTATIONSPECREQUEST, "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. - - + "__doc__": """Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1. + AutoMl.GetAnnotationSpec]. Attributes: name: Required. The resource name of the annotation spec to @@ -1581,8 +1563,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. - - Attributes: parent: Required. Resource name of the parent project where the model @@ -1603,8 +1583,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. - - Attributes: name: Required. Resource name of the model. @@ -1622,8 +1600,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. - - Attributes: parent: Required. Resource name of the project, from which to list the @@ -1659,8 +1635,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Response message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. - - Attributes: model: List of models in the requested page. @@ -1682,8 +1656,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. - - Attributes: name: Required. Resource name of the model being deleted. @@ -1701,8 +1673,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] - - Attributes: model: Required. The model which replaces the resource on the server. @@ -1722,8 +1692,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. - - Attributes: model_deployment_metadata: The per-domain specific deployment parameters. @@ -1747,8 +1715,6 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. - - Attributes: name: Required. Resource name of the model to undeploy. @@ -1765,11 +1731,9 @@ "DESCRIPTOR": _EXPORTMODELREQUEST, "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. Models - need to be enabled for exporting, otherwise an error code will be - returned. - - + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an error code will + be returned. Attributes: name: Required. The resource name of the model to export. @@ -1787,10 +1751,8 @@ { "DESCRIPTOR": _GETMODELEVALUATIONREQUEST, "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. - - + "__doc__": """Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1 + .AutoMl.GetModelEvaluation]. Attributes: name: Required. Resource name for the model evaluation. @@ -1806,10 +1768,8 @@ { "DESCRIPTOR": _LISTMODELEVALUATIONSREQUEST, "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. - - + "__doc__": """Request message for [AutoMl.ListModelEvaluations][google.cloud.automl. + v1.AutoMl.ListModelEvaluations]. Attributes: parent: Required. Resource name of the model to list the model @@ -1845,10 +1805,8 @@ { "DESCRIPTOR": _LISTMODELEVALUATIONSRESPONSE, "__module__": "google.cloud.automl_v1.proto.service_pb2", - "__doc__": """Response message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. - - + "__doc__": """Response message for [AutoMl.ListModelEvaluations][google.cloud.automl + .v1.AutoMl.ListModelEvaluations]. Attributes: model_evaluation: List of model evaluations in the requested page. diff --git a/google/cloud/automl_v1/proto/text_extraction_pb2.py b/google/cloud/automl_v1/proto/text_extraction_pb2.py index a93f26f8..c23e83ba 100644 --- a/google/cloud/automl_v1/proto/text_extraction_pb2.py +++ b/google/cloud/automl_v1/proto/text_extraction_pb2.py @@ -274,8 +274,6 @@ "DESCRIPTOR": _TEXTEXTRACTIONANNOTATION, "__module__": "google.cloud.automl_v1.proto.text_extraction_pb2", "__doc__": """Annotation for identifying spans of text. - - Attributes: annotation: Required. Text extraction annotations can either be a text @@ -304,8 +302,6 @@ "DESCRIPTOR": _TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, "__module__": "google.cloud.automl_v1.proto.text_extraction_pb2", "__doc__": """Metrics for a single confidence threshold. - - Attributes: confidence_threshold: Output only. The confidence threshold value used to compute @@ -324,8 +320,6 @@ "DESCRIPTOR": _TEXTEXTRACTIONEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1.proto.text_extraction_pb2", "__doc__": """Model evaluation metrics for text extraction problems. - - Attributes: au_prc: Output only. The Area under precision recall curve metric. diff --git a/google/cloud/automl_v1/proto/text_pb2.py b/google/cloud/automl_v1/proto/text_pb2.py index 97b06621..5e1c6a9e 100644 --- a/google/cloud/automl_v1/proto/text_pb2.py +++ b/google/cloud/automl_v1/proto/text_pb2.py @@ -244,8 +244,6 @@ "DESCRIPTOR": _TEXTCLASSIFICATIONDATASETMETADATA, "__module__": "google.cloud.automl_v1.proto.text_pb2", "__doc__": """Dataset metadata for classification. - - Attributes: classification_type: Required. Type of the classification problem. @@ -262,8 +260,6 @@ "DESCRIPTOR": _TEXTCLASSIFICATIONMODELMETADATA, "__module__": "google.cloud.automl_v1.proto.text_pb2", "__doc__": """Model metadata that is specific to text classification. - - Attributes: classification_type: Output only. Classification type of the dataset used to train @@ -280,9 +276,7 @@ { "DESCRIPTOR": _TEXTEXTRACTIONDATASETMETADATA, "__module__": "google.cloud.automl_v1.proto.text_pb2", - "__doc__": """Dataset metadata that is specific to text extraction - - """, + "__doc__": """Dataset metadata that is specific to text extraction""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.TextExtractionDatasetMetadata) }, ) @@ -294,9 +288,7 @@ { "DESCRIPTOR": _TEXTEXTRACTIONMODELMETADATA, "__module__": "google.cloud.automl_v1.proto.text_pb2", - "__doc__": """Model metadata that is specific to text extraction. - - """, + "__doc__": """Model metadata that is specific to text extraction.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.TextExtractionModelMetadata) }, ) @@ -309,8 +301,6 @@ "DESCRIPTOR": _TEXTSENTIMENTDATASETMETADATA, "__module__": "google.cloud.automl_v1.proto.text_pb2", "__doc__": """Dataset metadata for text sentiment. - - Attributes: sentiment_max: Required. A sentiment is expressed as an integer ordinal, @@ -331,9 +321,7 @@ { "DESCRIPTOR": _TEXTSENTIMENTMODELMETADATA, "__module__": "google.cloud.automl_v1.proto.text_pb2", - "__doc__": """Model metadata that is specific to text sentiment. - - """, + "__doc__": """Model metadata that is specific to text sentiment.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.TextSentimentModelMetadata) }, ) diff --git a/google/cloud/automl_v1/proto/text_segment_pb2.py b/google/cloud/automl_v1/proto/text_segment_pb2.py index 65bdda40..b377d4fd 100644 --- a/google/cloud/automl_v1/proto/text_segment_pb2.py +++ b/google/cloud/automl_v1/proto/text_segment_pb2.py @@ -108,10 +108,8 @@ { "DESCRIPTOR": _TEXTSEGMENT, "__module__": "google.cloud.automl_v1.proto.text_segment_pb2", - "__doc__": """A contiguous part of a text (string), assuming it has an - UTF-8 NFC encoding. - - + "__doc__": """A contiguous part of a text (string), assuming it has an UTF-8 NFC + encoding. Attributes: content: Output only. The content of the TextSegment. diff --git a/google/cloud/automl_v1/proto/text_sentiment_pb2.py b/google/cloud/automl_v1/proto/text_sentiment_pb2.py index b7fdb5e0..af81f6b9 100644 --- a/google/cloud/automl_v1/proto/text_sentiment_pb2.py +++ b/google/cloud/automl_v1/proto/text_sentiment_pb2.py @@ -252,8 +252,6 @@ "DESCRIPTOR": _TEXTSENTIMENTANNOTATION, "__module__": "google.cloud.automl_v1.proto.text_sentiment_pb2", "__doc__": """Contains annotation details specific to text sentiment. - - Attributes: sentiment: Output only. The sentiment with the semantic, as given to the @@ -283,8 +281,6 @@ "DESCRIPTOR": _TEXTSENTIMENTEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1.proto.text_sentiment_pb2", "__doc__": """Model evaluation metrics for text sentiment problems. - - Attributes: precision: Output only. Precision. diff --git a/google/cloud/automl_v1/proto/translation_pb2.py b/google/cloud/automl_v1/proto/translation_pb2.py index 6d7e8c2d..aff44011 100644 --- a/google/cloud/automl_v1/proto/translation_pb2.py +++ b/google/cloud/automl_v1/proto/translation_pb2.py @@ -282,8 +282,6 @@ "DESCRIPTOR": _TRANSLATIONDATASETMETADATA, "__module__": "google.cloud.automl_v1.proto.translation_pb2", "__doc__": """Dataset metadata that is specific to translation. - - Attributes: source_language_code: Required. The BCP-47 language code of the source language. @@ -302,8 +300,6 @@ "DESCRIPTOR": _TRANSLATIONEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1.proto.translation_pb2", "__doc__": """Evaluation metrics for the dataset. - - Attributes: bleu_score: Output only. BLEU score. @@ -322,8 +318,6 @@ "DESCRIPTOR": _TRANSLATIONMODELMETADATA, "__module__": "google.cloud.automl_v1.proto.translation_pb2", "__doc__": """Model metadata that is specific to translation. - - Attributes: base_model: The resource name of the model to use as a baseline to train @@ -349,8 +343,6 @@ "DESCRIPTOR": _TRANSLATIONANNOTATION, "__module__": "google.cloud.automl_v1.proto.translation_pb2", "__doc__": """Annotation details specific to translation. - - Attributes: translated_content: Output only . The translated content. diff --git a/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py b/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py index 9843fde7..ca20a260 100644 --- a/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py +++ b/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py @@ -358,10 +358,7 @@ { "DESCRIPTOR": _ANNOTATIONPAYLOAD, "__module__": "google.cloud.automl_v1beta1.proto.annotation_payload_pb2", - "__doc__": """Contains annotation information that is relevant to - AutoML. - - + "__doc__": """Contains annotation information that is relevant to AutoML. Attributes: detail: Output only . Additional information about the annotation diff --git a/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py b/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py index fe7c6669..a7dca153 100644 --- a/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py +++ b/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py @@ -113,8 +113,6 @@ "DESCRIPTOR": _ANNOTATIONSPEC, "__module__": "google.cloud.automl_v1beta1.proto.annotation_spec_pb2", "__doc__": """A definition of an annotation spec. - - Attributes: name: Output only. Resource name of the annotation spec. Form: ‘pro diff --git a/google/cloud/automl_v1beta1/proto/classification_pb2.py b/google/cloud/automl_v1beta1/proto/classification_pb2.py index 7b123416..9b526f1e 100644 --- a/google/cloud/automl_v1beta1/proto/classification_pb2.py +++ b/google/cloud/automl_v1beta1/proto/classification_pb2.py @@ -754,8 +754,6 @@ "DESCRIPTOR": _CLASSIFICATIONANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", "__doc__": """Contains annotation details specific to classification. - - Attributes: score: Output only. A confidence estimate between 0.0 and 1.0. A @@ -775,10 +773,7 @@ { "DESCRIPTOR": _VIDEOCLASSIFICATIONANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", - "__doc__": """Contains annotation details specific to video - classification. - - + "__doc__": """Contains annotation details specific to video classification. Attributes: type: Output only. Expresses the type of video classification. @@ -825,8 +820,6 @@ "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", "__doc__": """Metrics for a single confidence threshold. - - Attributes: confidence_threshold: Output only. Metrics are computed with an assumption that the @@ -893,8 +886,6 @@ "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW, "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", "__doc__": """Output only. A row in the confusion matrix. - - Attributes: example_count: Output only. Value of the specific cell in the confusion @@ -910,8 +901,6 @@ "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX, "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", "__doc__": """Confusion matrix of the model running the classification. - - Attributes: annotation_spec_id: Output only. IDs of the annotation specs used in the confusion @@ -938,11 +927,9 @@ ), "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", - "__doc__": """Model evaluation metrics for classification problems. - Note: For Video Classification this metrics only describe quality of the - Video Classification predictions of “segment_classification” type. - - + "__doc__": """Model evaluation metrics for classification problems. Note: For Video + Classification this metrics only describe quality of the Video + Classification predictions of “segment_classification” type. Attributes: au_prc: Output only. The Area Under Precision-Recall Curve metric. diff --git a/google/cloud/automl_v1beta1/proto/column_spec_pb2.py b/google/cloud/automl_v1beta1/proto/column_spec_pb2.py index d4c075f2..f13fb533 100644 --- a/google/cloud/automl_v1beta1/proto/column_spec_pb2.py +++ b/google/cloud/automl_v1beta1/proto/column_spec_pb2.py @@ -253,10 +253,8 @@ { "DESCRIPTOR": _COLUMNSPEC_CORRELATEDCOLUMN, "__module__": "google.cloud.automl_v1beta1.proto.column_spec_pb2", - "__doc__": """Identifies the table’s column, and its correlation with - the column this ColumnSpec describes. - - + "__doc__": """Identifies the table’s column, and its correlation with the column + this ColumnSpec describes. Attributes: column_spec_id: The column_spec_id of the correlated column, which belongs to @@ -269,11 +267,9 @@ ), "DESCRIPTOR": _COLUMNSPEC, "__module__": "google.cloud.automl_v1beta1.proto.column_spec_pb2", - "__doc__": """A representation of a column in a relational table. When - listing them, column specs are returned in the same order in which they - were given on import . Used by: \* Tables - - + "__doc__": """A representation of a column in a relational table. When listing them, + column specs are returned in the same order in which they were given + on import . Used by: \* Tables Attributes: name: Output only. The resource name of the column specs. Form: ``p diff --git a/google/cloud/automl_v1beta1/proto/data_items_pb2.py b/google/cloud/automl_v1beta1/proto/data_items_pb2.py index ff6524d8..e0770ccd 100644 --- a/google/cloud/automl_v1beta1/proto/data_items_pb2.py +++ b/google/cloud/automl_v1beta1/proto/data_items_pb2.py @@ -806,10 +806,8 @@ { "DESCRIPTOR": _IMAGE, "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", - "__doc__": """A representation of an image. Only images up to 30MB in - size are supported. - - + "__doc__": """A representation of an image. Only images up to 30MB in size are + supported. Attributes: data: Input only. The data representing the image. For Predict calls @@ -840,8 +838,6 @@ "DESCRIPTOR": _TEXTSNIPPET, "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", "__doc__": """A representation of a text snippet. - - Attributes: content: Required. The content of the text snippet as a string. Up to @@ -868,8 +864,6 @@ "DESCRIPTOR": _DOCUMENTDIMENSIONS, "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", "__doc__": """Message that describes dimension of a document. - - Attributes: unit: Unit of the dimension. @@ -893,11 +887,8 @@ { "DESCRIPTOR": _DOCUMENT_LAYOUT, "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", - "__doc__": """Describes the layout information of a - [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] - in the document. - - + "__doc__": """Describes the layout information of a [text_segment][google.cloud.auto + ml.v1beta1.Document.Layout.text_segment] in the document. Attributes: text_segment: Text Segment that represents a segment in [document_text][goog @@ -925,8 +916,6 @@ "DESCRIPTOR": _DOCUMENT, "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", "__doc__": """A structured text document e.g. a PDF. - - Attributes: input_config: An input config specifying the content of the document. @@ -953,8 +942,6 @@ "DESCRIPTOR": _ROW, "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", "__doc__": """A representation of a row in a relational table. - - Attributes: column_spec_ids: The resource IDs of the column specs describing the columns of @@ -983,8 +970,6 @@ "DESCRIPTOR": _EXAMPLEPAYLOAD, "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", "__doc__": """Example data used for training or prediction. - - Attributes: payload: Required. Input only. The example data. diff --git a/google/cloud/automl_v1beta1/proto/data_stats_pb2.py b/google/cloud/automl_v1beta1/proto/data_stats_pb2.py index e6f9535c..ac493e73 100644 --- a/google/cloud/automl_v1beta1/proto/data_stats_pb2.py +++ b/google/cloud/automl_v1beta1/proto/data_stats_pb2.py @@ -1022,10 +1022,8 @@ { "DESCRIPTOR": _DATASTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", - "__doc__": """The data statistics of a series of values that share the - same DataType. - - + "__doc__": """The data statistics of a series of values that share the same + DataType. Attributes: stats: The data statistics specific to a DataType. @@ -1064,8 +1062,6 @@ "DESCRIPTOR": _FLOAT64STATS_HISTOGRAMBUCKET, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """A bucket of a histogram. - - Attributes: min: The minimum value of the bucket, inclusive. @@ -1082,8 +1078,6 @@ "DESCRIPTOR": _FLOAT64STATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The data statistics of a series of FLOAT64 values. - - Attributes: mean: The mean of the series. @@ -1119,8 +1113,6 @@ "DESCRIPTOR": _STRINGSTATS_UNIGRAMSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The statistics of a unigram. - - Attributes: value: The unigram. @@ -1133,8 +1125,6 @@ "DESCRIPTOR": _STRINGSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The data statistics of a series of STRING values. - - Attributes: top_unigram_stats: The statistics of the top 20 unigrams, ordered by [count][goog @@ -1166,8 +1156,6 @@ "DESCRIPTOR": _TIMESTAMPSTATS_GRANULARSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """Stats split by a defined in context granularity. - - Attributes: buckets: A map from granularity key to example count for that key. E.g. @@ -1189,8 +1177,6 @@ "DESCRIPTOR": _TIMESTAMPSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The data statistics of a series of TIMESTAMP values. - - Attributes: granular_stats: The string key is the pre-defined granularity. Currently @@ -1214,8 +1200,6 @@ "DESCRIPTOR": _ARRAYSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The data statistics of a series of ARRAY values. - - Attributes: member_stats: Stats of all the values of all arrays, as if they were a @@ -1243,8 +1227,6 @@ "DESCRIPTOR": _STRUCTSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The data statistics of a series of STRUCT values. - - Attributes: field_stats: Map from a field name of the struct to data stats aggregated @@ -1267,8 +1249,6 @@ "DESCRIPTOR": _CATEGORYSTATS_SINGLECATEGORYSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The statistics of a single CATEGORY value. - - Attributes: value: The CATEGORY value. @@ -1281,8 +1261,6 @@ "DESCRIPTOR": _CATEGORYSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The data statistics of a series of CATEGORY values. - - Attributes: top_category_stats: The statistics of the top 20 CATEGORY values, ordered by [cou @@ -1301,11 +1279,9 @@ { "DESCRIPTOR": _CORRELATIONSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", - "__doc__": """A correlation statistics between two series of DataType - values. The series may have differing DataType-s, but within a single - series the DataType must be the same. - - + "__doc__": """A correlation statistics between two series of DataType values. The + series may have differing DataType-s, but within a single series the + DataType must be the same. Attributes: cramers_v: The correlation value using the Cramer’s V measure. diff --git a/google/cloud/automl_v1beta1/proto/data_types_pb2.py b/google/cloud/automl_v1beta1/proto/data_types_pb2.py index d87b6e8a..adc5227d 100644 --- a/google/cloud/automl_v1beta1/proto/data_types_pb2.py +++ b/google/cloud/automl_v1beta1/proto/data_types_pb2.py @@ -322,10 +322,8 @@ { "DESCRIPTOR": _DATATYPE, "__module__": "google.cloud.automl_v1beta1.proto.data_types_pb2", - "__doc__": """Indicated the type of data that can be stored in a - structured data entity (e.g. a table). - - + "__doc__": """Indicated the type of data that can be stored in a structured data + entity (e.g. a table). Attributes: details: Details of DataType-s that need additional specification. @@ -378,8 +376,6 @@ "__module__": "google.cloud.automl_v1beta1.proto.data_types_pb2", "__doc__": """\ ``StructType`` defines the DataType-s of a [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT] type. - - Attributes: fields: Unordered map of struct field names to their data types. diff --git a/google/cloud/automl_v1beta1/proto/dataset_pb2.py b/google/cloud/automl_v1beta1/proto/dataset_pb2.py index acc0a85c..0b2de618 100644 --- a/google/cloud/automl_v1beta1/proto/dataset_pb2.py +++ b/google/cloud/automl_v1beta1/proto/dataset_pb2.py @@ -460,11 +460,8 @@ { "DESCRIPTOR": _DATASET, "__module__": "google.cloud.automl_v1beta1.proto.dataset_pb2", - "__doc__": """A workspace for solving a single, particular machine - learning (ML) problem. A workspace contains examples that may be - annotated. - - + "__doc__": """A workspace for solving a single, particular machine learning (ML) + problem. A workspace contains examples that may be annotated. Attributes: dataset_metadata: Required. The dataset metadata that is specific to the problem diff --git a/google/cloud/automl_v1beta1/proto/detection_pb2.py b/google/cloud/automl_v1beta1/proto/detection_pb2.py index 4ea89ff9..e64aced2 100644 --- a/google/cloud/automl_v1beta1/proto/detection_pb2.py +++ b/google/cloud/automl_v1beta1/proto/detection_pb2.py @@ -564,8 +564,6 @@ "DESCRIPTOR": _IMAGEOBJECTDETECTIONANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", "__doc__": """Annotation details for image object detection. - - Attributes: bounding_box: Output only. The rectangle representing the object location. @@ -586,8 +584,6 @@ "DESCRIPTOR": _VIDEOOBJECTTRACKINGANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", "__doc__": """Annotation details for video object tracking. - - Attributes: instance_id: Optional. The instance of the object, expressed as a positive @@ -627,8 +623,6 @@ "DESCRIPTOR": _BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY, "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", "__doc__": """Metrics for a single confidence threshold. - - Attributes: confidence_threshold: Output only. The confidence threshold value used to compute @@ -645,11 +639,8 @@ ), "DESCRIPTOR": _BOUNDINGBOXMETRICSENTRY, "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", - "__doc__": """Bounding box matching model metrics for a single - intersection-over-union threshold and multiple label match confidence - thresholds. - - + "__doc__": """Bounding box matching model metrics for a single intersection-over- + union threshold and multiple label match confidence thresholds. Attributes: iou_threshold: Output only. The intersection-over-union threshold value used @@ -674,10 +665,8 @@ { "DESCRIPTOR": _IMAGEOBJECTDETECTIONEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", - "__doc__": """Model evaluation metrics for image object detection - problems. Evaluates prediction quality of labeled bounding boxes. - - + "__doc__": """Model evaluation metrics for image object detection problems. + Evaluates prediction quality of labeled bounding boxes. Attributes: evaluated_bounding_box_count: Output only. The total number of bounding boxes (i.e. summed @@ -704,12 +693,9 @@ { "DESCRIPTOR": _VIDEOOBJECTTRACKINGEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", - "__doc__": """Model evaluation metrics for video object tracking - problems. Evaluates prediction quality of both labeled bounding boxes - and labeled tracks (i.e. series of bounding boxes sharing same label and - instance ID). - - + "__doc__": """Model evaluation metrics for video object tracking problems. Evaluates + prediction quality of both labeled bounding boxes and labeled tracks + (i.e. series of bounding boxes sharing same label and instance ID). Attributes: evaluated_frame_count: Output only. The number of video frames used to create this diff --git a/google/cloud/automl_v1beta1/proto/geometry_pb2.py b/google/cloud/automl_v1beta1/proto/geometry_pb2.py index 8027aa04..e4164610 100644 --- a/google/cloud/automl_v1beta1/proto/geometry_pb2.py +++ b/google/cloud/automl_v1beta1/proto/geometry_pb2.py @@ -132,8 +132,6 @@ "DESCRIPTOR": _NORMALIZEDVERTEX, "__module__": "google.cloud.automl_v1beta1.proto.geometry_pb2", "__doc__": """Required. Horizontal coordinate. - - Attributes: y: Required. Vertical coordinate. @@ -149,11 +147,9 @@ { "DESCRIPTOR": _BOUNDINGPOLY, "__module__": "google.cloud.automl_v1beta1.proto.geometry_pb2", - "__doc__": """A bounding polygon of a detected object on a plane. On - output both vertices and normalized_vertices are provided. The polygon - is formed by connecting vertices in the order they are listed. - - + "__doc__": """A bounding polygon of a detected object on a plane. On output both + vertices and normalized_vertices are provided. The polygon is formed + by connecting vertices in the order they are listed. Attributes: normalized_vertices: Output only . The bounding polygon normalized vertices. diff --git a/google/cloud/automl_v1beta1/proto/image_pb2.py b/google/cloud/automl_v1beta1/proto/image_pb2.py index 62672732..24551f30 100644 --- a/google/cloud/automl_v1beta1/proto/image_pb2.py +++ b/google/cloud/automl_v1beta1/proto/image_pb2.py @@ -483,8 +483,6 @@ "DESCRIPTOR": _IMAGECLASSIFICATIONDATASETMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", "__doc__": """Dataset metadata that is specific to image classification. - - Attributes: classification_type: Required. Type of the classification problem. @@ -500,9 +498,7 @@ { "DESCRIPTOR": _IMAGEOBJECTDETECTIONDATASETMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", - "__doc__": """Dataset metadata specific to image object detection. - - """, + "__doc__": """Dataset metadata specific to image object detection.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionDatasetMetadata) }, ) @@ -515,8 +511,6 @@ "DESCRIPTOR": _IMAGECLASSIFICATIONMODELMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", "__doc__": """Model metadata for image classification. - - Attributes: base_model_id: Optional. The ID of the ``base`` model. If it is specified, @@ -593,8 +587,6 @@ "DESCRIPTOR": _IMAGEOBJECTDETECTIONMODELMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", "__doc__": """Model metadata specific to image object detection. - - Attributes: model_type: Optional. Type of the model. The available values are: \* @@ -666,10 +658,7 @@ { "DESCRIPTOR": _IMAGECLASSIFICATIONMODELDEPLOYMENTMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", - "__doc__": """Model deployment metadata specific to Image - Classification. - - + "__doc__": """Model deployment metadata specific to Image Classification. Attributes: node_count: Input only. The number of nodes to deploy the model on. A node @@ -689,10 +678,7 @@ { "DESCRIPTOR": _IMAGEOBJECTDETECTIONMODELDEPLOYMENTMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", - "__doc__": """Model deployment metadata specific to Image Object - Detection. - - + "__doc__": """Model deployment metadata specific to Image Object Detection. Attributes: node_count: Input only. The number of nodes to deploy the model on. A node diff --git a/google/cloud/automl_v1beta1/proto/io_pb2.py b/google/cloud/automl_v1beta1/proto/io_pb2.py index 44ea8ca2..cc4c8305 100644 --- a/google/cloud/automl_v1beta1/proto/io_pb2.py +++ b/google/cloud/automl_v1beta1/proto/io_pb2.py @@ -923,264 +923,241 @@ ), "DESCRIPTOR": _INPUTCONFIG, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """Input configuration for ImportData Action. - - The format of input depends on dataset_metadata the Dataset into which - the import is happening has. As input source the + "__doc__": """Input configuration for ImportData Action. The format of input + depends on dataset_metadata the Dataset into which the import is + happening has. As input source the [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is expected, unless specified otherwise. Additionally any input .CSV file by itself must be 100MB or smaller, unless specified otherwise. If an - “example” file (that is, image, video etc.) with identical content (even - if it had different GCS_FILE_PATH) is mentioned multiple times, then its - label, bounding boxes etc. are appended. The same file should be always - provided with the same ML_USE and GCS_FILE_PATH, if it is not, then - these values are nondeterministically selected from the given ones. - - The formats are represented in EBNF with commas being literal and with - non-terminal symbols defined near the end of this comment. The formats - are: - - - For Image Classification: CSV file(s) with each line in format: - ML_USE,GCS_FILE_PATH,LABEL,LABEL,… GCS_FILE_PATH leads to image of up - to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP, - .BMP, .TIFF, .ICO For MULTICLASS classification type, at most one - LABEL is allowed per image. If an image has not yet been labeled, - then it should be mentioned just once with no LABEL. Some sample - rows: TRAIN,gs://folder/image1.jpg,daisy - TEST,gs://folder/image2.jpg,dandelion,tulip,rose - UNASSIGNED,gs://folder/image3.jpg,daisy - UNASSIGNED,gs://folder/image4.jpg - - - For Image Object Detection: CSV file(s) with each line in format: - ML_USE,GCS_FILE_PATH,(LABEL,BOUNDING_BOX \| ,,,,,,,) GCS_FILE_PATH - leads to image of up to 30MB in size. Supported extensions: .JPEG, - .GIF, .PNG. Each image is assumed to be exhaustively labeled. The - minimum allowed BOUNDING_BOX edge length is 0.01, and no more than - 500 BOUNDING_BOX-es per image are allowed (one BOUNDING_BOX is - defined per line). If an image has not yet been labeled, then it - should be mentioned just once with no LABEL and the “,,,,,,,” in - place of the BOUNDING_BOX. For images which are known to not contain - any bounding boxes, they should be labelled explictly as - “NEGATIVE_IMAGE”, followed by “,,,,,,,” in place of the BOUNDING_BOX. - Sample rows: TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, - TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, - UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 - TEST,gs://folder/im3.png,,,,,,,,, - TRAIN,gs://folder/im4.png,NEGATIVE_IMAGE,,,,,,,,, - - - For Video Classification: CSV file(s) with each line in format: - ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be used. - The GCS_FILE_PATH should lead to another .csv file which describes - examples that have given ML_USE, using the following row format: - GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END \| ,,) Here - GCS_FILE_PATH leads to a video of up to 50GB in size and up to 3h - duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. - TIME_SEGMENT_START and TIME_SEGMENT_END must be within the length of - the video, and end has to be after the start. Any segment of a video - which has one or more labels on it, is considered a hard negative for - all other labels. Any segment with no labels on it is considered to - be unknown. If a whole video is unknown, then it shuold be mentioned - just once with “,,” in place of LABEL, - TIME_SEGMENT_START,TIME_SEGMENT_END. Sample top level CSV file: - TRAIN,gs://folder/train_videos.csv TEST,gs://folder/test_videos.csv - UNASSIGNED,gs://folder/other_videos.csv Sample rows of a CSV file for - a particular ML_USE: gs://folder/video1.avi,car,120,180.000021 - gs://folder/video1.avi,bike,150,180.000021 - gs://folder/vid2.avi,car,0,60.5 gs://folder/vid3.avi,,, - - - For Video Object Tracking: CSV file(s) with each line in format: - ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be used. - The GCS_FILE_PATH should lead to another .csv file which describes - examples that have given ML_USE, using one of the following row - format: GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX or - GCS_FILE_PATH,,,,,,,,,, Here GCS_FILE_PATH leads to a video of up to - 50GB in size and up to 3h duration. Supported extensions: .MOV, - .MPEG4, .MP4, .AVI. Providing INSTANCE_IDs can help to obtain a - better model. When a specific labeled entity leaves the video frame, - and shows up afterwards it is not required, albeit preferable, that - the same INSTANCE_ID is given to it. TIMESTAMP must be within the - length of the video, the BOUNDING_BOX is assumed to be drawn on the - closest video’s frame to the TIMESTAMP. Any mentioned by the - TIMESTAMP frame is expected to be exhaustively labeled and no more - than 500 BOUNDING_BOX-es per frame are allowed. If a whole video is - unknown, then it should be mentioned just once with “,,,,,,,,,,” in - place of LABEL, [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX. Sample top - level CSV file: TRAIN,gs://folder/train_videos.csv - TEST,gs://folder/test_videos.csv - UNASSIGNED,gs://folder/other_videos.csv Seven sample rows of a CSV - file for a particular ML_USE: - gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 - gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 - gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 - gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, - gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, - gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, - gs://folder/video2.avi,,,,,,,,,,, - - For Text Extraction: CSV file(s) with each line in format: - ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .JSONL (that is, JSON - Lines) file which either imports text in-line or as documents. Any - given .JSONL file must be 100MB or smaller. The in-line .JSONL file - contains, per line, a proto that wraps a TextSnippet proto (in json - representation) followed by one or more AnnotationPayload protos - (called annotations), which have display_name and text_extraction - detail populated. The given text is expected to be annotated - exhaustively, for example, if you look for animals and text contains - “dolphin” that is not labeled, then “dolphin” is assumed to not be an - animal. Any given text snippet content must be 10KB or smaller, and - also be UTF-8 NFC encoded (ASCII already is). The document .JSONL - file contains, per line, a proto that wraps a Document proto. The - Document proto must have either document_text or input_config set. In - document_text case, the Document proto may also contain the spatial - information of the document, including layout, document dimension and - page number. In input_config case, only PDF documents are supported - now, and each document may be up to 2MB large. Currently, annotations - on documents cannot be specified at import. Three sample CSV rows: - TRAIN,gs://folder/file1.jsonl VALIDATE,gs://folder/file2.jsonl - TEST,gs://folder/file3.jsonl Sample in-line JSON Lines file for - entity extraction (presented here with artificial line breaks, but - the only actual line break is denoted by :raw-latex:`\n`).: { - “document”: { “document_text”: {“content”: “dog cat”} “layout”: [ { - “text_segment”: { “start_offset”: 0, “end_offset”: 3, }, - “page_number”: 1, “bounding_poly”: { “normalized_vertices”: [ {“x”: - 0.1, “y”: 0.1}, {“x”: 0.1, “y”: 0.3}, {“x”: 0.3, “y”: 0.3}, {“x”: - 0.3, “y”: 0.1}, ], }, “text_segment_type”: TOKEN, }, { - “text_segment”: { “start_offset”: 4, “end_offset”: 7, }, - “page_number”: 1, “bounding_poly”: { “normalized_vertices”: [ {“x”: - 0.4, “y”: 0.1}, {“x”: 0.4, “y”: 0.3}, {“x”: 0.8, “y”: 0.3}, {“x”: - 0.8, “y”: 0.1}, ], }, “text_segment_type”: TOKEN, }], - “document_dimensions”: { “width”: 8.27, “height”: 11.69, “unit”: - INCH, } “page_count”: 1, }, “annotations”: [ { “display_name”: - “animal”, “text_extraction”: {“text_segment”: {“start_offset”: 0, - “end_offset”: 3}} }, { “display_name”: “animal”, “text_extraction”: - {“text_segment”: {“start_offset”: 4, “end_offset”: 7}} } ], - }:raw-latex:`\n - { - "text_snippet": { - "content": "This dog is good." - }, - "annotations": [ - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 5, "end_offset": 8} - } - } - ] - }` Sample document JSON Lines file (presented here with - artificial line breaks, but the only actual line break is denoted by - :raw-latex:`\n`).: { “document”: { “input_config”: { “gcs_source”: { - “input_uris”: [ “gs://folder/document1.pdf” ] } } } }:raw-latex:`\n - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] - } - } - } - }` - - - For Text Classification: CSV file(s) with each line in format: - ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),LABEL,LABEL,… TEXT_SNIPPET and - GCS_FILE_PATH are distinguished by a pattern. If the column content - is a valid gcs file path, i.e. prefixed by “gs://”, it will be - treated as a GCS_FILE_PATH, else if the content is enclosed within - double quotes ("“), it is treated as a TEXT_SNIPPET. In the - GCS_FILE_PATH case, the path must lead to a .txt file with UTF-8 - encoding, for example,”gs://folder/content.txt“, and the content in - it is extracted as a text snippet. In TEXT_SNIPPET case, the column - content excluding quotes is treated as to be imported text snippet. - In both cases, the text snippet/file size must be within 128kB. - Maximum 100 unique labels are allowed per CSV row. Sample rows: - TRAIN,”They have bad food and very rude“,RudeService,BadFood - TRAIN,gs://folder/content.txt,SlowService TEST,”Typically always bad - service there.“,RudeService VALIDATE,”Stomach ache to go.",BadFood - - - For Text Sentiment: CSV file(s) with each line in format: - ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),SENTIMENT TEXT_SNIPPET and - GCS_FILE_PATH are distinguished by a pattern. If the column content - is a valid gcs file path, that is, prefixed by “gs://”, it is treated - as a GCS_FILE_PATH, otherwise it is treated as a TEXT_SNIPPET. In the - GCS_FILE_PATH case, the path must lead to a .txt file with UTF-8 - encoding, for example, “gs://folder/content.txt”, and the content in - it is extracted as a text snippet. In TEXT_SNIPPET case, the column - content itself is treated as to be imported text snippet. In both - cases, the text snippet must be up to 500 characters long. Sample - rows: TRAIN,“@freewrytin this is way too good for your product”,2 - TRAIN,“I need this product so bad”,3 TEST,“Thank you for this - product.”,4 VALIDATE,gs://folder/content.txt,2 - - - For Tables: Either - [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or - - [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source] - can be used. All inputs is concatenated into a single - - [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_name] - For gcs_source: CSV file(s), where the first row of the first file is - the header, containing unique column names. If the first row of a - subsequent file is the same as the header, then it is also treated as a - header. All other rows contain values for the corresponding columns. - Each .CSV file by itself must be 10GB or smaller, and their total size - must be 100GB or smaller. First three sample rows of a CSV file: - “Id”,“First Name”,“Last Name”,“Dob”,“Addresses” - - “1”,“John”,“Doe”,“1968-01-22”,“[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]” - - “2”,“Jane”,“Doe”,“1980-10-16”,“[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} - For bigquery_source: An URI of a BigQuery table. The user data size of - the BigQuery table must be 100GB or smaller. An imported table must have - between 2 and 1,000 columns, inclusive, and between 1000 and 100,000,000 - rows, inclusive. There are at most 5 import data running in parallel. - Definitions: ML_USE =”TRAIN" \| “VALIDATE” \| “TEST” \| “UNASSIGNED” - Describes how the given example (file) should be used for model - training. “UNASSIGNED” can be used when user has no preference. + “example” file (that is, image, video etc.) with identical content + (even if it had different GCS_FILE_PATH) is mentioned multiple times, + then its label, bounding boxes etc. are appended. The same file should + be always provided with the same ML_USE and GCS_FILE_PATH, if it is + not, then these values are nondeterministically selected from the + given ones. The formats are represented in EBNF with commas being + literal and with non-terminal symbols defined near the end of this + comment. The formats are: - For Image Classification: CSV file(s) + with each line in format: ML_USE,GCS_FILE_PATH,LABEL,LABEL,… + GCS_FILE_PATH leads to image of up to 30MB in size. Supported + extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP, .TIFF, .ICO For + MULTICLASS classification type, at most one LABEL is allowed per + image. If an image has not yet been labeled, then it should be + mentioned just once with no LABEL. Some sample rows: + TRAIN,gs://folder/image1.jpg,daisy + TEST,gs://folder/image2.jpg,dandelion,tulip,rose + UNASSIGNED,gs://folder/image3.jpg,daisy + UNASSIGNED,gs://folder/image4.jpg - For Image Object Detection: CSV + file(s) with each line in format: + ML_USE,GCS_FILE_PATH,(LABEL,BOUNDING_BOX \| ,,,,,,,) GCS_FILE_PATH + leads to image of up to 30MB in size. Supported extensions: .JPEG, + .GIF, .PNG. Each image is assumed to be exhaustively labeled. The + minimum allowed BOUNDING_BOX edge length is 0.01, and no more than + 500 BOUNDING_BOX-es per image are allowed (one BOUNDING_BOX is + defined per line). If an image has not yet been labeled, then it + should be mentioned just once with no LABEL and the “,,,,,,,” in + place of the BOUNDING_BOX. For images which are known to not contain + any bounding boxes, they should be labelled explictly as + “NEGATIVE_IMAGE”, followed by “,,,,,,,” in place of the BOUNDING_BOX. + Sample rows: TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, + TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, + UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 + TEST,gs://folder/im3.png,,,,,,,,, + TRAIN,gs://folder/im4.png,NEGATIVE_IMAGE,,,,,,,,, - For Video + Classification: CSV file(s) with each line in format: + ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be used. + The GCS_FILE_PATH should lead to another .csv file which describes + examples that have given ML_USE, using the following row format: + GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END \| ,,) Here + GCS_FILE_PATH leads to a video of up to 50GB in size and up to 3h + duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. + TIME_SEGMENT_START and TIME_SEGMENT_END must be within the length of + the video, and end has to be after the start. Any segment of a video + which has one or more labels on it, is considered a hard negative for + all other labels. Any segment with no labels on it is considered to + be unknown. If a whole video is unknown, then it shuold be mentioned + just once with “,,” in place of LABEL, + TIME_SEGMENT_START,TIME_SEGMENT_END. Sample top level CSV file: + TRAIN,gs://folder/train_videos.csv TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv Sample rows of a CSV file for + a particular ML_USE: gs://folder/video1.avi,car,120,180.000021 + gs://folder/video1.avi,bike,150,180.000021 + gs://folder/vid2.avi,car,0,60.5 gs://folder/vid3.avi,,, - For Video + Object Tracking: CSV file(s) with each line in format: + ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be used. + The GCS_FILE_PATH should lead to another .csv file which describes + examples that have given ML_USE, using one of the following row + format: GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX or + GCS_FILE_PATH,,,,,,,,,, Here GCS_FILE_PATH leads to a video of up to + 50GB in size and up to 3h duration. Supported extensions: .MOV, + .MPEG4, .MP4, .AVI. Providing INSTANCE_IDs can help to obtain a + better model. When a specific labeled entity leaves the video frame, + and shows up afterwards it is not required, albeit preferable, that + the same INSTANCE_ID is given to it. TIMESTAMP must be within the + length of the video, the BOUNDING_BOX is assumed to be drawn on the + closest video’s frame to the TIMESTAMP. Any mentioned by the + TIMESTAMP frame is expected to be exhaustively labeled and no more + than 500 BOUNDING_BOX-es per frame are allowed. If a whole video is + unknown, then it should be mentioned just once with “,,,,,,,,,,” in + place of LABEL, [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX. Sample top + level CSV file: TRAIN,gs://folder/train_videos.csv + TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv Seven sample rows of a CSV + file for a particular ML_USE: + gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 + gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 + gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 + gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, + gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, + gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, + gs://folder/video2.avi,,,,,,,,,,, - For Text Extraction: CSV file(s) + with each line in format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads + to a .JSONL (that is, JSON Lines) file which either imports text + in-line or as documents. Any given .JSONL file must be 100MB or + smaller. The in-line .JSONL file contains, per line, a proto that + wraps a TextSnippet proto (in json representation) followed by one + or more AnnotationPayload protos (called annotations), which have + display_name and text_extraction detail populated. The given text + is expected to be annotated exhaustively, for example, if you look + for animals and text contains “dolphin” that is not labeled, then + “dolphin” is assumed to not be an animal. Any given text snippet + content must be 10KB or smaller, and also be UTF-8 NFC encoded + (ASCII already is). The document .JSONL file contains, per line, a + proto that wraps a Document proto. The Document proto must have + either document_text or input_config set. In document_text case, + the Document proto may also contain the spatial information of the + document, including layout, document dimension and page number. In + input_config case, only PDF documents are supported now, and each + document may be up to 2MB large. Currently, annotations on + documents cannot be specified at import. Three sample CSV rows: + TRAIN,gs://folder/file1.jsonl VALIDATE,gs://folder/file2.jsonl + TEST,gs://folder/file3.jsonl Sample in-line JSON Lines file for + entity extraction (presented here with artificial line breaks, but + the only actual line break is denoted by :raw-latex:`\n`).: { + “document”: { “document_text”: {“content”: “dog cat”} “layout”: [ { + “text_segment”: { “start_offset”: 0, “end_offset”: 3, }, + “page_number”: 1, “bounding_poly”: { “normalized_vertices”: [ {“x”: + 0.1, “y”: 0.1}, {“x”: 0.1, “y”: 0.3}, {“x”: 0.3, “y”: 0.3}, {“x”: + 0.3, “y”: 0.1}, ], }, “text_segment_type”: TOKEN, }, { + “text_segment”: { “start_offset”: 4, “end_offset”: 7, }, + “page_number”: 1, “bounding_poly”: { “normalized_vertices”: [ {“x”: + 0.4, “y”: 0.1}, {“x”: 0.4, “y”: 0.3}, {“x”: 0.8, “y”: 0.3}, {“x”: + 0.8, “y”: 0.1}, ], }, “text_segment_type”: TOKEN, }], + “document_dimensions”: { “width”: 8.27, “height”: 11.69, “unit”: + INCH, } “page_count”: 1, }, “annotations”: [ { “display_name”: + “animal”, “text_extraction”: {“text_segment”: {“start_offset”: 0, + “end_offset”: 3}} }, { “display_name”: “animal”, “text_extraction”: + {“text_segment”: {“start_offset”: 4, “end_offset”: 7}} } ], }:raw- + latex:`\n { "text_snippet": { + "content": "This dog is good." }, + "annotations": [ { "display_name": + "animal", "text_extraction": { + "text_segment": {"start_offset": 5, "end_offset": 8} } + } ] }` Sample document JSON Lines file (presented + here with artificial line breaks, but the only actual line break is + denoted by :raw-latex:`\n`).: { “document”: { “input_config”: { + “gcs_source”: { “input_uris”: [ “gs://folder/document1.pdf” ] } } } + }:raw-latex:`\n { "document": { + "input_config": { "gcs_source": { "input_uris": [ + "gs://folder/document2.pdf" ] } } + } }` - For Text Classification: CSV file(s) with each line + in format: ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),LABEL,LABEL,… + TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If + the column content is a valid gcs file path, i.e. prefixed by + “gs://”, it will be treated as a GCS_FILE_PATH, else if the content + is enclosed within double quotes ("“), it is treated as a + TEXT_SNIPPET. In the GCS_FILE_PATH case, the path must lead to a + .txt file with UTF-8 encoding, for + example,”gs://folder/content.txt“, and the content in it is + extracted as a text snippet. In TEXT_SNIPPET case, the column + content excluding quotes is treated as to be imported text snippet. + In both cases, the text snippet/file size must be within 128kB. + Maximum 100 unique labels are allowed per CSV row. Sample rows: + TRAIN,”They have bad food and very rude“,RudeService,BadFood + TRAIN,gs://folder/content.txt,SlowService TEST,”Typically always bad + service there.“,RudeService VALIDATE,”Stomach ache to go.",BadFood - + For Text Sentiment: CSV file(s) with each line in format: + ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),SENTIMENT TEXT_SNIPPET and + GCS_FILE_PATH are distinguished by a pattern. If the column content + is a valid gcs file path, that is, prefixed by “gs://”, it is treated + as a GCS_FILE_PATH, otherwise it is treated as a TEXT_SNIPPET. In the + GCS_FILE_PATH case, the path must lead to a .txt file with UTF-8 + encoding, for example, “gs://folder/content.txt”, and the content in + it is extracted as a text snippet. In TEXT_SNIPPET case, the column + content itself is treated as to be imported text snippet. In both + cases, the text snippet must be up to 500 characters long. Sample + rows: TRAIN,“@freewrytin this is way too good for your product”,2 + TRAIN,“I need this product so bad”,3 TEST,“Thank you for this + product.”,4 VALIDATE,gs://folder/content.txt,2 - For Tables: Either + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or [ + bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_sour + ce] can be used. All inputs is concatenated into a single [primary_ta + ble][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_n + ame] For gcs_source: CSV file(s), where the first row of the first + file is the header, containing unique column names. If the first row + of a subsequent file is the same as the header, then it is also + treated as a header. All other rows contain values for the + corresponding columns. Each .CSV file by itself must be 10GB or + smaller, and their total size must be 100GB or smaller. First three + sample rows of a CSV file: “Id”,“First Name”,“Last + Name”,“Dob”,“Addresses” “1”,“John”,“Doe”,“1968-01-22”,“[{"status":"cu + rrent","address":"123_First_Avenue","city":"Seattle","state":"WA","zip + ":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Mai + n_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears" + :"5"}]” “2”,“Jane”,“Doe”,“1980-10-16”,“[{"status":"current","address" + :"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOf + Years":"2"},{"status":"previous","address":"321_Main_Street","city":"H + oboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} For + bigquery_source: An URI of a BigQuery table. The user data size of the + BigQuery table must be 100GB or smaller. An imported table must have + between 2 and 1,000 columns, inclusive, and between 1000 and + 100,000,000 rows, inclusive. There are at most 5 import data running + in parallel. Definitions: ML_USE =”TRAIN" \| “VALIDATE” \| “TEST” \| + “UNASSIGNED” Describes how the given example (file) should be used for + model training. “UNASSIGNED” can be used when user has no preference. GCS_FILE_PATH = A path to file on GCS, e.g. “gs://folder/image1.png”. - LABEL = A display name of an object on an image, video etc., e.g. “dog”. - Must be up to 32 characters long and can consist only of ASCII Latin - letters A-Z and a-z, underscores(_), and ASCII digits 0-9. For each - label an AnnotationSpec is created which display_name becomes the label; - AnnotationSpecs are given back in predictions. INSTANCE_ID = A positive - integer that identifies a specific instance of a labeled entity on an - example. Used e.g. to track two cars on a video while being able to tell - apart which one is which. BOUNDING_BOX = VERTEX,VERTEX,VERTEX,VERTEX \| - VERTEX,,,VERTEX,, A rectangle parallel to the frame of the example - (image, video). If 4 vertices are given they are connected by edges in - the order provided, if 2 are given they are recognized as diagonally - opposite vertices of the rectangle. VERTEX = COORDINATE,COORDINATE First - coordinate is horizontal (x), the second is vertical (y). COORDINATE = A - float in 0 to 1 range, relative to total length of image or video in - given dimension. For fractions the leading non-decimal 0 can be omitted - (i.e. 0.3 = .3). Point 0,0 is in top left. TIME_SEGMENT_START = - TIME_OFFSET Expresses a beginning, inclusive, of a time segment within - an example that has a time dimension (e.g. video). TIME_SEGMENT_END = - TIME_OFFSET Expresses an end, exclusive, of a time segment within an - example that has a time dimension (e.g. video). TIME_OFFSET = A number - of seconds as measured from the start of an example (e.g. video). - Fractions are allowed, up to a microsecond precision. “inf” is allowed, - and it means the end of the example. TEXT_SNIPPET = A content of a text - snippet, UTF-8 encoded, enclosed within double quotes ("“). SENTIMENT = - An integer between 0 and - Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive). - Describes the ordinal of the sentiment - higher value means a more - positive sentiment. All the values are completely relative, i.e. neither - 0 needs to mean a negative or neutral sentiment nor sentiment_max needs - to mean a positive one - it is just required that 0 is the least - positive sentiment in the data, and sentiment_max is the most positive - one. The SENTIMENT shouldn’t be confused with”score" or “magnitude” from - the previous Natural Language Sentiment Analysis API. All SENTIMENT - values between 0 and sentiment_max must be represented in the imported - data. On prediction the same 0 to sentiment_max range will be used. The - difference between neighboring sentiment values needs not to be uniform, - e.g. 1 and 2 may be similar whereas the difference between 2 and 3 may - be huge. - - Errors: If any of the provided CSV files can’t be parsed or if more than - certain percent of CSV rows cannot be processed then the operation fails - and nothing is imported. Regardless of overall success or failure the - per-row failures, up to a certain count cap, is listed in - Operation.metadata.partial_failures. - - + LABEL = A display name of an object on an image, video etc., e.g. + “dog”. Must be up to 32 characters long and can consist only of ASCII + Latin letters A-Z and a-z, underscores(_), and ASCII digits 0-9. For + each label an AnnotationSpec is created which display_name becomes the + label; AnnotationSpecs are given back in predictions. INSTANCE_ID = A + positive integer that identifies a specific instance of a labeled + entity on an example. Used e.g. to track two cars on a video while + being able to tell apart which one is which. BOUNDING_BOX = + VERTEX,VERTEX,VERTEX,VERTEX \| VERTEX,,,VERTEX,, A rectangle parallel + to the frame of the example (image, video). If 4 vertices are given + they are connected by edges in the order provided, if 2 are given they + are recognized as diagonally opposite vertices of the rectangle. + VERTEX = COORDINATE,COORDINATE First coordinate is horizontal (x), the + second is vertical (y). COORDINATE = A float in 0 to 1 range, relative + to total length of image or video in given dimension. For fractions + the leading non-decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is + in top left. TIME_SEGMENT_START = TIME_OFFSET Expresses a beginning, + inclusive, of a time segment within an example that has a time + dimension (e.g. video). TIME_SEGMENT_END = TIME_OFFSET Expresses an + end, exclusive, of a time segment within an example that has a time + dimension (e.g. video). TIME_OFFSET = A number of seconds as measured + from the start of an example (e.g. video). Fractions are allowed, up + to a microsecond precision. “inf” is allowed, and it means the end of + the example. TEXT_SNIPPET = A content of a text snippet, UTF-8 + encoded, enclosed within double quotes ("“). SENTIMENT = An integer + between 0 and Dataset.text_sentiment_dataset_metadata.sentiment_max + (inclusive). Describes the ordinal of the sentiment - higher value + means a more positive sentiment. All the values are completely + relative, i.e. neither 0 needs to mean a negative or neutral sentiment + nor sentiment_max needs to mean a positive one - it is just required + that 0 is the least positive sentiment in the data, and sentiment_max + is the most positive one. The SENTIMENT shouldn’t be confused + with”score" or “magnitude” from the previous Natural Language + Sentiment Analysis API. All SENTIMENT values between 0 and + sentiment_max must be represented in the imported data. On prediction + the same 0 to sentiment_max range will be used. The difference between + neighboring sentiment values needs not to be uniform, e.g. 1 and 2 may + be similar whereas the difference between 2 and 3 may be huge. + Errors: If any of the provided CSV files can’t be parsed or if more + than certain percent of CSV rows cannot be processed then the + operation fails and nothing is imported. Regardless of overall success + or failure the per-row failures, up to a certain count cap, is listed + in Operation.metadata.partial_failures. Attributes: source: The source of the input. @@ -1211,138 +1188,131 @@ { "DESCRIPTOR": _BATCHPREDICTINPUTCONFIG, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """Input configuration for BatchPredict Action. - - The format of input depends on the ML problem of the model used for - prediction. As input source the + "__doc__": """Input configuration for BatchPredict Action. The format of input + depends on the ML problem of the model used for prediction. As input + source the [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is - expected, unless specified otherwise. - - The formats are represented in EBNF with commas being literal and with - non-terminal symbols defined near the end of this comment. The formats - are: - - - For Image Classification: CSV file(s) with each line having just a - single column: GCS_FILE_PATH which leads to image of up to 30MB in - size. Supported extensions: .JPEG, .GIF, .PNG. This path is treated - as the ID in the Batch predict output. Three sample rows: - gs://folder/image1.jpeg gs://folder/image2.gif gs://folder/image3.png - - - For Image Object Detection: CSV file(s) with each line having just a - single column: GCS_FILE_PATH which leads to image of up to 30MB in - size. Supported extensions: .JPEG, .GIF, .PNG. This path is treated - as the ID in the Batch predict output. Three sample rows: - gs://folder/image1.jpeg gs://folder/image2.gif gs://folder/image3.png + expected, unless specified otherwise. The formats are represented in + EBNF with commas being literal and with non-terminal symbols defined + near the end of this comment. The formats are: - For Image + Classification: CSV file(s) with each line having just a single + column: GCS_FILE_PATH which leads to image of up to 30MB in size. + Supported extensions: .JPEG, .GIF, .PNG. This path is treated as + the ID in the Batch predict output. Three sample rows: + gs://folder/image1.jpeg gs://folder/image2.gif gs://folder/image3.png + - For Image Object Detection: CSV file(s) with each line having just + a single column: GCS_FILE_PATH which leads to image of up to 30MB + in size. Supported extensions: .JPEG, .GIF, .PNG. This path is + treated as the ID in the Batch predict output. Three sample rows: + gs://folder/image1.jpeg gs://folder/image2.gif gs://folder/image3.png - For Video Classification: CSV file(s) with each line in format: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH leads - to video of up to 50GB in size and up to 3h duration. Supported - extensions: .MOV, .MPEG4, .MP4, .AVI. TIME_SEGMENT_START and - TIME_SEGMENT_END must be within the length of the video, and end has - to be after the start. Three sample rows: - gs://folder/video1.mp4,10,40 gs://folder/video1.mp4,20,60 - gs://folder/vid2.mov,0,inf - - - For Video Object Tracking: CSV file(s) with each line in format: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH leads - to video of up to 50GB in size and up to 3h duration. Supported - extensions: .MOV, .MPEG4, .MP4, .AVI. TIME_SEGMENT_START and - TIME_SEGMENT_END must be within the length of the video, and end has - to be after the start. Three sample rows: - gs://folder/video1.mp4,10,240 gs://folder/video1.mp4,300,360 - gs://folder/vid2.mov,0,inf - - For Text Classification: CSV file(s) with each line having just a - single column: GCS_FILE_PATH \| TEXT_SNIPPET Any given text file can - have size upto 128kB. Any given text snippet content must have 60,000 - characters or less. Three sample rows: gs://folder/text1.txt “Some - text content to predict” gs://folder/text3.pdf Supported file - extensions: .txt, .pdf - - - For Text Sentiment: CSV file(s) with each line having just a single - column: GCS_FILE_PATH \| TEXT_SNIPPET Any given text file can have - size upto 128kB. Any given text snippet content must have 500 - characters or less. Three sample rows: gs://folder/text1.txt “Some - text content to predict” gs://folder/text3.pdf Supported file - extensions: .txt, .pdf - - - For Text Extraction .JSONL (i.e. JSON Lines) file(s) which either - provide text in-line or as documents (for a single BatchPredict call - only one of the these formats may be used). The in-line .JSONL - file(s) contain per line a proto that wraps a temporary user-assigned - TextSnippet ID (string up to 2000 characters long) called “id”, a - TextSnippet proto (in json representation) and zero or more - TextFeature protos. Any given text snippet content must have 30,000 - characters or less, and also be UTF-8 NFC encoded (ASCII already is). - The IDs provided should be unique. The document .JSONL file(s) - contain, per line, a proto that wraps a Document proto with - input_config set. Only PDF documents are supported now, and each - document must be up to 2MB large. Any given .JSONL file must be 100MB - or smaller, and no more than 20 files may be given. - - - For Tables: Either - [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or - - [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source]. - GCS case: CSV file(s), each by itself 10GB or smaller and total size - must be 100GB or smaller, where first file must have a header containing - column names. If the first row of a subsequent file is the same as the - header, then it is also treated as a header. All other rows contain - values for the corresponding columns. The column names must contain the - model’s - - [input_feature_column_specs’][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] - + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH leads + to video of up to 50GB in size and up to 3h duration. Supported + extensions: .MOV, .MPEG4, .MP4, .AVI. TIME_SEGMENT_START and + TIME_SEGMENT_END must be within the length of the video, and end has + to be after the start. Three sample rows: + gs://folder/video1.mp4,10,40 gs://folder/video1.mp4,20,60 + gs://folder/vid2.mov,0,inf - For Video Object Tracking: CSV file(s) + with each line in format: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH leads + to video of up to 50GB in size and up to 3h duration. Supported + extensions: .MOV, .MPEG4, .MP4, .AVI. TIME_SEGMENT_START and + TIME_SEGMENT_END must be within the length of the video, and end has + to be after the start. Three sample rows: + gs://folder/video1.mp4,10,240 gs://folder/video1.mp4,300,360 + gs://folder/vid2.mov,0,inf - For Text Classification: CSV file(s) + with each line having just a single column: GCS_FILE_PATH \| + TEXT_SNIPPET Any given text file can have size upto 128kB. Any + given text snippet content must have 60,000 characters or less. + Three sample rows: gs://folder/text1.txt “Some text content to + predict” gs://folder/text3.pdf Supported file extensions: .txt, + .pdf - For Text Sentiment: CSV file(s) with each line having just a + single column: GCS_FILE_PATH \| TEXT_SNIPPET Any given text file + can have size upto 128kB. Any given text snippet content must have + 500 characters or less. Three sample rows: gs://folder/text1.txt + “Some text content to predict” gs://folder/text3.pdf Supported file + extensions: .txt, .pdf - For Text Extraction .JSONL (i.e. JSON + Lines) file(s) which either provide text in-line or as documents + (for a single BatchPredict call only one of the these formats may + be used). The in-line .JSONL file(s) contain per line a proto that + wraps a temporary user-assigned TextSnippet ID (string up to 2000 + characters long) called “id”, a TextSnippet proto (in json + representation) and zero or more TextFeature protos. Any given text + snippet content must have 30,000 characters or less, and also be + UTF-8 NFC encoded (ASCII already is). The IDs provided should be + unique. The document .JSONL file(s) contain, per line, a proto that + wraps a Document proto with input_config set. Only PDF documents + are supported now, and each document must be up to 2MB large. Any + given .JSONL file must be 100MB or smaller, and no more than 20 + files may be given. Sample in-line JSON Lines file (presented here + with artificial line breaks, but the only actual line break is + denoted by :raw-latex:`\n`): { “id”: “my_first_id”, “text_snippet”: + { “content”: “dog car cat”}, “text_features”: [ { “text_segment”: + {“start_offset”: 4, “end_offset”: 6}, “structural_type”: PARAGRAPH, + “bounding_poly”: { “normalized_vertices”: [ {“x”: 0.1, “y”: 0.1}, + {“x”: 0.1, “y”: 0.3}, {“x”: 0.3, “y”: 0.3}, {“x”: 0.3, “y”: 0.1}, ] + }, } ], }:raw-latex:`\n { "id": "2", + "text_snippet": { "content": "An elaborate content", + "mime_type": "text/plain" } }` Sample document + JSON Lines file (presented here with artificial line breaks, but + the only actual line break is denoted by :raw-latex:`\n`).: { + “document”: { “input_config”: { “gcs_source”: { “input_uris”: [ + “gs://folder/document1.pdf” ] } } } }:raw-latex:`\n { + "document": { "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] + } } } }` - For Tables: Either + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or [ + bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_sour + ce]. GCS case: CSV file(s), each by itself 10GB or smaller and total + size must be 100GB or smaller, where first file must have a header + containing column names. If the first row of a subsequent file is the + same as the header, then it is also treated as a header. All other + rows contain values for the corresponding columns. The column names + must contain the model’s [input_feature_column_specs’][google.cloud.a + utoml.v1beta1.TablesModelMetadata.input_feature_column_specs] [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] (order doesn’t matter). The columns corresponding to the model’s input feature column specs must contain values compatible with the column - spec’s data types. Prediction on all the rows, i.e. the CSV lines, will - be attempted. For FORECASTING - - [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: - all columns having - - [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] - type will be ignored. First three sample rows of a CSV file: “First - Name”,“Last Name”,“Dob”,“Addresses” - - “John”,“Doe”,“1968-01-22”,“[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]” - - “Jane”,“Doe”,“1980-10-16”,"[{“status”:“current”,“address”:“789_Any_Avenue”,“city”:“Albany”,“state”:“NY”,“zip”:“33333”,“numberOfYears”:“2”},{“status”:“previous”,“address”:“321_Main_Street”,“city”:“Hoboken”,“state”:“NJ”,“zip”:“44444”,“numberOfYears”:“3”}]} - BigQuery case: An URI of a BigQuery table. The user data size of the - BigQuery table must be 100GB or smaller. The column names must contain - the model’s - - [input_feature_column_specs’][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] - + spec’s data types. Prediction on all the rows, i.e. the CSV lines, + will be attempted. For FORECASTING [prediction_type][google.cloud.aut + oml.v1beta1.TablesModelMetadata.prediction_type]: all columns having + [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSp + ec.ForecastingMetadata.ColumnType] type will be ignored. First three + sample rows of a CSV file: “First Name”,“Last Name”,“Dob”,“Addresses” + “John”,“Doe”,“1968-01-22”,“[{"status":"current","address":"123_First_A + venue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1" + },{"status":"previous","address":"456_Main_Street","city":"Portland"," + state":"OR","zip":"22222","numberOfYears":"5"}]” “Jane”,“Doe”,“1980-1 + 0-16”,"[{“status”:“current”,“address”:“789_Any_Avenue”,“city”:“Albany” + ,“state”:“NY”,“zip”:“33333”,“numberOfYears”:“2”},{“status”:“previous”, + “address”:“321_Main_Street”,“city”:“Hoboken”,“state”:“NJ”,“zip”:“44444 + ”,“numberOfYears”:“3”}]} BigQuery case: An URI of a BigQuery table. + The user data size of the BigQuery table must be 100GB or smaller. The + column names must contain the model’s [input_feature_column_specs’][g + oogle.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_sp + ecs] [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] (order doesn’t matter). The columns corresponding to the model’s input feature column specs must contain values compatible with the column spec’s data types. Prediction on all the rows of the table will be - attempted. For FORECASTING - - [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: - all columns having - - [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] - type will be ignored. - - Definitions: GCS_FILE_PATH = A path to file on GCS, e.g. - “gs://folder/video.avi”. TEXT_SNIPPET = A content of a text snippet, - UTF-8 encoded, enclosed within double quotes ("“) TIME_SEGMENT_START = - TIME_OFFSET Expresses a beginning, inclusive, of a time segment within - an example that has a time dimension (e.g. video). TIME_SEGMENT_END = - TIME_OFFSET Expresses an end, exclusive, of a time segment within an - example that has a time dimension (e.g. video). TIME_OFFSET = A number - of seconds as measured from the start of an example (e.g. video). - Fractions are allowed, up to a microsecond precision.”inf" is allowed - and it means the end of the example. - - Errors: If any of the provided CSV files can’t be parsed or if more than - certain percent of CSV rows cannot be processed then the operation fails - and prediction does not happen. Regardless of overall success or failure - the per-row failures, up to a certain count cap, will be listed in - Operation.metadata.partial_failures. - - + attempted. For FORECASTING [prediction_type][google.cloud.automl.v1be + ta1.TablesModelMetadata.prediction_type]: all columns having [TIME_SE + RIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.Forec + astingMetadata.ColumnType] type will be ignored. Definitions: + GCS_FILE_PATH = A path to file on GCS, e.g. “gs://folder/video.avi”. + TEXT_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed + within double quotes ("“) TIME_SEGMENT_START = TIME_OFFSET Expresses a + beginning, inclusive, of a time segment within an example that has a + time dimension (e.g. video). TIME_SEGMENT_END = TIME_OFFSET Expresses + an end, exclusive, of a time segment within an example that has a time + dimension (e.g. video). TIME_OFFSET = A number of seconds as measured + from the start of an example (e.g. video). Fractions are allowed, up + to a microsecond precision.”inf" is allowed and it means the end of + the example. Errors: If any of the provided CSV files can’t be parsed + or if more than certain percent of CSV rows cannot be processed then + the operation fails and prediction does not happen. Regardless of + overall success or failure the per-row failures, up to a certain count + cap, will be listed in Operation.metadata.partial_failures. Attributes: source: Required. The source of the input. @@ -1364,8 +1334,6 @@ "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", "__doc__": """Input configuration of a [Document][google.cloud.automl.v1beta1.Document]. - - Attributes: gcs_source: The Google Cloud Storage location of the document file. Only a @@ -1383,33 +1351,26 @@ { "DESCRIPTOR": _OUTPUTCONFIG, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """\* For Translation: CSV file ``translation.csv``, with - each line in format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV - file which describes examples that have given ML_USE, using the - following row format per line: TEXT_SNIPPET (in source language) - \\tTEXT_SNIPPET (in target language) - - - For Tables: Output depends on whether the dataset was imported from - GCS or BigQuery. GCS case: - - [gcs_destination][google.cloud.automl.v1beta1.OutputConfig.gcs_destination] - must be set. Exported are CSV file(s) ``tables_1.csv``, + "__doc__": """\* For Translation: CSV file ``translation.csv``, with each line in + format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV file which + describes examples that have given ML_USE, using the following row + format per line: TEXT_SNIPPET (in source language) :raw-latex:`\t + `TEXT_SNIPPET (in target language) - For Tables: Output depends on + whether the dataset was imported from GCS or BigQuery. GCS case: [ + gcs_destination][google.cloud.automl.v1beta1.OutputConfig.gcs_destinat + ion] must be set. Exported are CSV file(s) ``tables_1.csv``, ``tables_2.csv``,…,\ ``tables_N.csv`` with each having as header line the table’s column names, and all other lines contain values for the - header columns. BigQuery case: - - [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] - pointing to a BigQuery project must be set. In the given project a new - dataset will be created with name - - ``export_data__`` - where will be made BigQuery-dataset-name compatible (e.g. most special - characters will become underscores), and timestamp will be in - YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In that dataset a - new table called ``primary_table`` will be created, and filled with - precisely the same data as this obtained on import. - - + header columns. BigQuery case: [bigquery_destination][google.cloud.au + toml.v1beta1.OutputConfig.bigquery_destination] pointing to a BigQuery + project must be set. In the given project a new dataset will be + created with name ``export_data__`` where will be made BigQuery- + dataset-name compatible (e.g. most special characters will become + underscores), and timestamp will be in YYYY_MM_DDThh_mm_ss_sssZ “based + on ISO-8601” format. In that dataset a new table called + ``primary_table`` will be created, and filled with precisely the same + data as this obtained on import. Attributes: destination: Required. The destination of the output. @@ -1434,277 +1395,218 @@ { "DESCRIPTOR": _BATCHPREDICTOUTPUTCONFIG, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """Output configuration for BatchPredict Action. - - As destination the - - [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination] - must be set unless specified otherwise for a domain. If gcs_destination - is set then in the given directory a new directory is created. Its name - will be “prediction--”, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ - ISO-8601 format. The contents of it depends on the ML problem the - predictions are made for. - - - For Image Classification: In the created directory files - ``image_classification_1.jsonl``, - ``image_classification_2.jsonl``,…,\ ``image_classification_N.jsonl`` - will be created, where N may be 1, and depends on the total number of - the successfully predicted images and annotations. A single image - will be listed only once with all its annotations, and its - annotations will never be split across files. Each .JSONL file will - contain, per line, a JSON representation of a proto that wraps - image’s “ID” : “” followed by a list of zero or more - AnnotationPayload protos (called annotations), which have - classification detail populated. If prediction for any image failed - (partially or completely), then an additional ``errors_1.jsonl``, - ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N - depends on total number of failed predictions). These files will have - a JSON representation of a proto that wraps the same “ID” : “” but - here followed by exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``\ fields. - - - For Image Object Detection: In the created directory files - ``image_object_detection_1.jsonl``, - ``image_object_detection_2.jsonl``,…,\ ``image_object_detection_N.jsonl`` - will be created, where N may be 1, and depends on the total number of - the successfully predicted images and annotations. Each .JSONL file - will contain, per line, a JSON representation of a proto that wraps - image’s “ID” : “” followed by a list of zero or more - AnnotationPayload protos (called annotations), which have - image_object_detection detail populated. A single image will be - listed only once with all its annotations, and its annotations will - never be split across files. If prediction for any image failed - (partially or completely), then additional ``errors_1.jsonl``, - ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N - depends on total number of failed predictions). These files will have - a JSON representation of a proto that wraps the same “ID” : “” but - here followed by exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``\ fields. \* For Video - Classification: In the created directory a video_classification.csv - file, and a .JSON file per each video classification requested in the - input (i.e. each line in given CSV(s)), will be created. - - :: - - The format of video_classification.csv is: - - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS - where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to - 1 the prediction input lines (i.e. video_classification.csv has - precisely the same number of lines as the prediction input had.) - JSON_FILE_NAME = Name of .JSON file in the output directory, which - contains prediction responses for the video time segment. STATUS = “OK” - if prediction completed successfully, or an error code with message + "__doc__": """Output configuration for BatchPredict Action. As destination the [gc + s_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gc + s_destination] must be set unless specified otherwise for a domain. If + gcs_destination is set then in the given directory a new directory is + created. Its name will be “prediction--”, where timestamp is in YYYY- + MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends on the + ML problem the predictions are made for. - For Image Classification: + In the created directory files ``image_classification_1.jsonl``, + ``image_classification_2.jsonl``,…,\ ``image_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total number of + the successfully predicted images and annotations. A single image + will be listed only once with all its annotations, and its + annotations will never be split across files. Each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + image’s “ID” : “” followed by a list of zero or more + AnnotationPayload protos (called annotations), which have + classification detail populated. If prediction for any image failed + (partially or completely), then an additional ``errors_1.jsonl``, + ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N + depends on total number of failed predictions). These files will have + a JSON representation of a proto that wraps the same “ID” : “” but + here followed by exactly one ```google.rpc.Status`` `__ + containing only ``code`` and ``message``\ fields. - For Image Object + Detection: In the created directory files + ``image_object_detection_1.jsonl``, + ``image_object_detection_2.jsonl``,…,\ + ``image_object_detection_N.jsonl`` will be created, where N may be + 1, and depends on the total number of the successfully predicted + images and annotations. Each .JSONL file will contain, per line, a + JSON representation of a proto that wraps image’s “ID” : “” + followed by a list of zero or more AnnotationPayload protos (called + annotations), which have image_object_detection detail populated. A + single image will be listed only once with all its annotations, and + its annotations will never be split across files. If prediction for + any image failed (partially or completely), then additional + ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files + will be created (N depends on total number of failed predictions). + These files will have a JSON representation of a proto that wraps + the same “ID” : “” but here followed by exactly one + ```google.rpc.Status`` `__ containing only ``code`` and + ``message``\ fields. \* For Video Classification: In the created + directory a video_classification.csv file, and a .JSON file per each + video classification requested in the input (i.e. each line in given + CSV(s)), will be created. :: The format of + video_classification.csv is: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SE + GMENT_END,JSON_FILE_NAME,STATUS where: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 the + prediction input lines (i.e. video_classification.csv has precisely + the same number of lines as the prediction input had.) JSON_FILE_NAME + = Name of .JSON file in the output directory, which contains + prediction responses for the video time segment. STATUS = “OK” if + prediction completed successfully, or an error code with message otherwise. If STATUS is not “OK” then the .JSON file for that line may - not exist or be empty. - - :: - - Each .JSON file, assuming STATUS is "OK", will contain a list of - AnnotationPayload protos in JSON format, which are the predictions - for the video time segment the file is assigned to in the - video_classification.csv. All AnnotationPayload protos will have - video_classification field set, and will be sorted by - video_classification.type field (note that the returned types are - governed by `classifaction_types` parameter in - [PredictService.BatchPredictRequest.params][]). - - - For Video Object Tracking: In the created directory a - video_object_tracking.csv file will be created, and multiple files - video_object_trackinng_1.json, video_object_trackinng_2.json,…, - video_object_trackinng_N.json, where N is the number of requests in - the input (i.e. the number of lines in given CSV(s)). - - :: - - The format of video_object_tracking.csv is: - - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS - where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to - 1 the prediction input lines (i.e. video_object_tracking.csv has - precisely the same number of lines as the prediction input had.) - JSON_FILE_NAME = Name of .JSON file in the output directory, which - contains prediction responses for the video time segment. STATUS = “OK” - if prediction completed successfully, or an error code with message + not exist or be empty. :: Each .JSON file, assuming STATUS is + "OK", will contain a list of AnnotationPayload protos in JSON + format, which are the predictions for the video time segment + the file is assigned to in the video_classification.csv. All + AnnotationPayload protos will have video_classification field + set, and will be sorted by video_classification.type field + (note that the returned types are governed by + `classifaction_types` parameter in + [PredictService.BatchPredictRequest.params][]). - For Video Object + Tracking: In the created directory a video_object_tracking.csv file + will be created, and multiple files video_object_trackinng_1.json, + video_object_trackinng_2.json,…, video_object_trackinng_N.json, + where N is the number of requests in the input (i.e. the number of + lines in given CSV(s)). :: The format of + video_object_tracking.csv is: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_S + EGMENT_END,JSON_FILE_NAME,STATUS where: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 the + prediction input lines (i.e. video_object_tracking.csv has precisely + the same number of lines as the prediction input had.) JSON_FILE_NAME + = Name of .JSON file in the output directory, which contains + prediction responses for the video time segment. STATUS = “OK” if + prediction completed successfully, or an error code with message otherwise. If STATUS is not “OK” then the .JSON file for that line may - not exist or be empty. - - :: - - Each .JSON file, assuming STATUS is "OK", will contain a list of - AnnotationPayload protos in JSON format, which are the predictions - for each frame of the video time segment the file is assigned to in - video_object_tracking.csv. All AnnotationPayload protos will have - video_object_tracking field set. - - - For Text Classification: In the created directory files - ``text_classification_1.jsonl``, - ``text_classification_2.jsonl``,…,\ ``text_classification_N.jsonl`` - will be created, where N may be 1, and depends on the total number of - inputs and annotations found. - - :: - - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text snippet or input text file and a list of - zero or more AnnotationPayload protos (called annotations), which - have classification detail populated. A single text snippet or file - will be listed only once with all its annotations, and its - annotations will never be split across files. - - If prediction for any text snippet or file failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input text snippet or input text file followed by - exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``. - - - For Text Sentiment: In the created directory files - ``text_sentiment_1.jsonl``, - ``text_sentiment_2.jsonl``,…,\ ``text_sentiment_N.jsonl`` will be - created, where N may be 1, and depends on the total number of inputs - and annotations found. - - :: - - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text snippet or input text file and a list of - zero or more AnnotationPayload protos (called annotations), which - have text_sentiment detail populated. A single text snippet or file - will be listed only once with all its annotations, and its - annotations will never be split across files. - - If prediction for any text snippet or file failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input text snippet or input text file followed by - exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``. - - - For Text Extraction: In the created directory files - ``text_extraction_1.jsonl``, - ``text_extraction_2.jsonl``,…,\ ``text_extraction_N.jsonl`` will be - created, where N may be 1, and depends on the total number of inputs - and annotations found. The contents of these .JSONL file(s) depend on - whether the input used inline text, or documents. If input was - inline, then each .JSONL file will contain, per line, a JSON - representation of a proto that wraps given in request text snippet’s - “id” (if specified), followed by input text snippet, and a list of - zero or more AnnotationPayload protos (called annotations), which - have text_extraction detail populated. A single text snippet will be - listed only once with all its annotations, and its annotations will - never be split across files. If input used documents, then each - .JSONL file will contain, per line, a JSON representation of a proto - that wraps given in request document proto, followed by its OCR-ed - representation in the form of a text snippet, finally followed by a - list of zero or more AnnotationPayload protos (called annotations), - which have text_extraction detail populated and refer, via their - indices, to the OCR-ed text snippet. A single document (and its text - snippet) will be listed only once with all its annotations, and its - annotations will never be split across files. If prediction for any - text snippet failed (partially or completely), then additional - ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files - will be created (N depends on total number of failed predictions). - These files will have a JSON representation of a proto that wraps - either the “id” : “” (in case of inline) or the document proto (in - case of document) but here followed by exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``. - - - For Tables: Output depends on whether - - [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination] - or - - [bigquery_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.bigquery_destination] - is set (either is allowed). GCS case: In the created directory files - ``tables_1.csv``, ``tables_2.csv``,…, ``tables_N.csv`` will be created, - where N may be 1, and depends on the total number of the successfully - predicted rows. For all CLASSIFICATION - - [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: - Each .csv file will contain a header, listing all columns’ - + not exist or be empty. :: Each .JSON file, assuming STATUS is + "OK", will contain a list of AnnotationPayload protos in JSON + format, which are the predictions for each frame of the video + time segment the file is assigned to in + video_object_tracking.csv. All AnnotationPayload protos will have + video_object_tracking field set. - For Text Classification: In the + created directory files ``text_classification_1.jsonl``, + ``text_classification_2.jsonl``,…,\ ``text_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total number of + inputs and annotations found. :: Each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + input text snippet or input text file and a list of zero or more + AnnotationPayload protos (called annotations), which have + classification detail populated. A single text snippet or file + will be listed only once with all its annotations, and its + annotations will never be split across files. If prediction for + any text snippet or file failed (partially or completely), then + additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input text snippet or input text file followed by + exactly one ```google.rpc.Status`` `__ containing only + ``code`` and ``message``. - For Text Sentiment: In the created + directory files ``text_sentiment_1.jsonl``, + ``text_sentiment_2.jsonl``,…,\ ``text_sentiment_N.jsonl`` will be + created, where N may be 1, and depends on the total number of inputs + and annotations found. :: Each .JSONL file will contain, + per line, a JSON representation of a proto that wraps input text + snippet or input text file and a list of zero or more + AnnotationPayload protos (called annotations), which have + text_sentiment detail populated. A single text snippet or file + will be listed only once with all its annotations, and its + annotations will never be split across files. If prediction for + any text snippet or file failed (partially or completely), then + additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input text snippet or input text file followed by + exactly one ```google.rpc.Status`` `__ containing only + ``code`` and ``message``. - For Text Extraction: In the created + directory files ``text_extraction_1.jsonl``, + ``text_extraction_2.jsonl``,…,\ ``text_extraction_N.jsonl`` will be + created, where N may be 1, and depends on the total number of inputs + and annotations found. The contents of these .JSONL file(s) depend on + whether the input used inline text, or documents. If input was + inline, then each .JSONL file will contain, per line, a JSON + representation of a proto that wraps given in request text snippet’s + “id” (if specified), followed by input text snippet, and a list of + zero or more AnnotationPayload protos (called annotations), which + have text_extraction detail populated. A single text snippet will be + listed only once with all its annotations, and its annotations will + never be split across files. If input used documents, then each + .JSONL file will contain, per line, a JSON representation of a proto + that wraps given in request document proto, followed by its OCR-ed + representation in the form of a text snippet, finally followed by a + list of zero or more AnnotationPayload protos (called annotations), + which have text_extraction detail populated and refer, via their + indices, to the OCR-ed text snippet. A single document (and its text + snippet) will be listed only once with all its annotations, and its + annotations will never be split across files. If prediction for any + text snippet failed (partially or completely), then additional + ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files + will be created (N depends on total number of failed predictions). + These files will have a JSON representation of a proto that wraps + either the “id” : “” (in case of inline) or the document proto (in + case of document) but here followed by exactly one + ```google.rpc.Status`` `__ containing only ``code`` and + ``message``. - For Tables: Output depends on whether [gcs_destinati + on][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destinati + on] or [bigquery_destination][google.cloud.automl.v1beta1.BatchPredic + tOutputConfig.bigquery_destination] is set (either is allowed). GCS + case: In the created directory files ``tables_1.csv``, + ``tables_2.csv``,…, ``tables_N.csv`` will be created, where N may be + 1, and depends on the total number of the successfully predicted rows. + For all CLASSIFICATION [prediction_type-s][google.cloud.automl.v1beta + 1.TablesModelMetadata.prediction_type]: Each .csv file will contain a + header, listing all columns’ [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] - given on input followed by M target column names in the format of - - "<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - - [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>\_\_score" - where M is the number of distinct target values, i.e. number of distinct - values in the target column of the table used to train the model. - Subsequent lines will contain the respective values of successfully - predicted rows, with the last, i.e. the target, columns having the - corresponding prediction + given on input followed by M target column names in the format of "<[ + target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.t + arget_column_spec] [display_name][google.cloud.automl.v1beta1.ColumnS + pec.display_name]>\_\_score" where M is the number of distinct target + values, i.e. number of distinct values in the target column of the + table used to train the model. Subsequent lines will contain the + respective values of successfully predicted rows, with the last, + i.e. the target, columns having the corresponding prediction [scores][google.cloud.automl.v1beta1.TablesAnnotation.score]. For - REGRESSION and FORECASTING - - [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: - Each .csv file will contain a header, listing all columns’ + REGRESSION and FORECASTING [prediction_type-s][google.cloud.automl.v1 + beta1.TablesModelMetadata.prediction_type]: Each .csv file will + contain a header, listing all columns’ [display_name-s][google.cloud.automl.v1beta1.display_name] given on - input followed by the predicted target column with name in the format of - - "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - + input followed by the predicted target column with name in the format + of "predicted_<[target_column_specs][google.cloud.automl.v1beta1.Tabl + esModelMetadata.target_column_spec] [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" Subsequent lines will contain the respective values of successfully predicted rows, with the last, i.e. the target, column having the predicted target value. If prediction for any rows failed, then an additional ``errors_1.csv``, ``errors_2.csv``,…, ``errors_N.csv`` will - be created (N depends on total number of failed rows). These files will - have analogous format as ``tables_*.csv``, but always with a single - target column having - - ```google.rpc.Status`` `__ + be created (N depends on total number of failed rows). These files + will have analogous format as ``tables_*.csv``, but always with a + single target column having ```google.rpc.Status`` `__ represented as a JSON string, and containing only ``code`` and - ``message``. BigQuery case: - - [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] - pointing to a BigQuery project must be set. In the given project a new - dataset will be created with name - ``prediction__`` where - will be made BigQuery-dataset-name compatible (e.g. most special - characters will become underscores), and timestamp will be in - YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In the dataset two - tables will be created, ``predictions``, and ``errors``. The - ``predictions`` table’s column names will be the input columns’ - + ``message``. BigQuery case: [bigquery_destination][google.cloud.autom + l.v1beta1.OutputConfig.bigquery_destination] pointing to a BigQuery + project must be set. In the given project a new dataset will be + created with name ``prediction__`` where will be made BigQuery-dataset-name compatible + (e.g. most special characters will become underscores), and timestamp + will be in YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In the + dataset two tables will be created, ``predictions``, and ``errors``. + The ``predictions`` table’s column names will be the input columns’ [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] - followed by the target column with name in the format of - - "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - + followed by the target column with name in the format of "predicted_< + [target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata. + target_column_spec] [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" The input feature columns will contain the respective values of successfully predicted rows, with the target column having an ARRAY of - [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], represented as STRUCT-s, containing [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. The ``errors`` table contains rows for which the prediction has failed, it has analogous input columns while the target column name is in the - format of - - "errors_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - + format of "errors_<[target_column_specs][google.cloud.automl.v1beta1. + TablesModelMetadata.target_column_spec] [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>", - and as a value has - - ```google.rpc.Status`` `__ - represented as a STRUCT, and containing only ``code`` and ``message``. - - + and as a value has ```google.rpc.Status`` `__ represented + as a STRUCT, and containing only ``code`` and ``message``. Attributes: destination: Required. The destination of the output. @@ -1735,8 +1637,6 @@ "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", "__doc__": """Output configuration for ModelExport Action. - - Attributes: destination: Required. The destination of the output. @@ -1796,35 +1696,28 @@ { "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESOUTPUTCONFIG, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """Output configuration for ExportEvaluatedExamples Action. - Note that this call is available only for 30 days since the moment the - model was evaluated. The output depends on the domain, as follows (note - that only examples from the TEST set are exported): - - - For Tables: - - [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] - pointing to a BigQuery project must be set. In the given project a new - dataset will be created with name - - ``export_evaluated_examples__`` - where will be made BigQuery-dataset-name compatible (e.g. most special - characters will become underscores), and timestamp will be in + "__doc__": """Output configuration for ExportEvaluatedExamples Action. Note that + this call is available only for 30 days since the moment the model was + evaluated. The output depends on the domain, as follows (note that + only examples from the TEST set are exported): - For Tables: [bigqu + ery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_des + tination] pointing to a BigQuery project must be set. In the given + project a new dataset will be created with name + ``export_evaluated_examples__`` where will be made BigQuery-dataset-name compatible (e.g. most + special characters will become underscores), and timestamp will be in YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In the dataset an - ``evaluated_examples`` table will be created. It will have all the same - columns as the - - [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_spec_id] - of the [dataset][google.cloud.automl.v1beta1.Model.dataset_id] from - which the model was created, as they were at the moment of model’s - evaluation (this includes the target column with its ground truth), - followed by a column called “predicted\_”. That last column will contain - the model’s prediction result for each respective row, given as ARRAY of + ``evaluated_examples`` table will be created. It will have all the + same columns as the [primary_table][google.cloud.automl.v1beta1.Table + sDatasetMetadata.primary_table_spec_id] of the + [dataset][google.cloud.automl.v1beta1.Model.dataset_id] from which the + model was created, as they were at the moment of model’s evaluation + (this includes the target column with its ground truth), followed by a + column called “predicted\_”. That last column will contain the model’s + prediction result for each respective row, given as ARRAY of [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], represented as STRUCT-s, containing [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. - - Attributes: destination: Required. The destination of the output. @@ -1843,8 +1736,6 @@ "DESCRIPTOR": _GCSSOURCE, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", "__doc__": """The Google Cloud Storage location for the input content. - - Attributes: input_uris: Required. Google Cloud Storage URIs to input files, up to 2000 @@ -1863,8 +1754,6 @@ "DESCRIPTOR": _BIGQUERYSOURCE, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", "__doc__": """The BigQuery location for the input content. - - Attributes: input_uri: Required. BigQuery URI to a table, up to 2000 characters long. @@ -1882,10 +1771,8 @@ { "DESCRIPTOR": _GCSDESTINATION, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", - "__doc__": """The Google Cloud Storage location where the output is to - be written to. - - + "__doc__": """The Google Cloud Storage location where the output is to be written + to. Attributes: output_uri_prefix: Required. Google Cloud Storage URI to output directory, up to @@ -1906,8 +1793,6 @@ "DESCRIPTOR": _BIGQUERYDESTINATION, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", "__doc__": """The BigQuery location for the output content. - - Attributes: output_uri: Required. BigQuery URI to a project, up to 2000 characters @@ -1925,8 +1810,6 @@ "DESCRIPTOR": _GCRDESTINATION, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", "__doc__": """The GCR location where the image must be pushed to. - - Attributes: output_uri: Required. Google Contained Registry URI of the new image, up diff --git a/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py b/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py index 73e1f8e3..4152dd72 100644 --- a/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py +++ b/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py @@ -393,8 +393,6 @@ "DESCRIPTOR": _MODELEVALUATION, "__module__": "google.cloud.automl_v1beta1.proto.model_evaluation_pb2", "__doc__": """Evaluation results of a model. - - Attributes: metrics: Output only. Problem type specific evaluation metrics. diff --git a/google/cloud/automl_v1beta1/proto/model_pb2.py b/google/cloud/automl_v1beta1/proto/model_pb2.py index 284eeb3e..d30bd8dd 100644 --- a/google/cloud/automl_v1beta1/proto/model_pb2.py +++ b/google/cloud/automl_v1beta1/proto/model_pb2.py @@ -494,8 +494,6 @@ "DESCRIPTOR": _MODEL, "__module__": "google.cloud.automl_v1beta1.proto.model_pb2", "__doc__": """API proto representing a trained machine learning model. - - Attributes: model_metadata: Required. The model metadata that is specific to the problem diff --git a/google/cloud/automl_v1beta1/proto/operations_pb2.py b/google/cloud/automl_v1beta1/proto/operations_pb2.py index 10b90dcd..212b1aaa 100644 --- a/google/cloud/automl_v1beta1/proto/operations_pb2.py +++ b/google/cloud/automl_v1beta1/proto/operations_pb2.py @@ -990,10 +990,8 @@ { "DESCRIPTOR": _OPERATIONMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Metadata used across all long running operations returned - by AutoML API. - - + "__doc__": """Metadata used across all long running operations returned by AutoML + API. Attributes: details: Ouptut only. Details of specific operation. Even if this field @@ -1042,10 +1040,7 @@ { "DESCRIPTOR": _DELETEOPERATIONMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Details of operations that perform deletes of any - entities. - - """, + "__doc__": """Details of operations that perform deletes of any entities.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeleteOperationMetadata) }, ) @@ -1057,9 +1052,7 @@ { "DESCRIPTOR": _DEPLOYMODELOPERATIONMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Details of DeployModel operation. - - """, + "__doc__": """Details of DeployModel operation.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeployModelOperationMetadata) }, ) @@ -1071,9 +1064,7 @@ { "DESCRIPTOR": _UNDEPLOYMODELOPERATIONMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Details of UndeployModel operation. - - """, + "__doc__": """Details of UndeployModel operation.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UndeployModelOperationMetadata) }, ) @@ -1085,9 +1076,7 @@ { "DESCRIPTOR": _CREATEMODELOPERATIONMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Details of CreateModel operation. - - """, + "__doc__": """Details of CreateModel operation.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CreateModelOperationMetadata) }, ) @@ -1099,9 +1088,7 @@ { "DESCRIPTOR": _IMPORTDATAOPERATIONMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Details of ImportData operation. - - """, + "__doc__": """Details of ImportData operation.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImportDataOperationMetadata) }, ) @@ -1119,8 +1106,6 @@ "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", "__doc__": """Further describes this export data’s output. Supplements [OutputConfig][google.cloud.automl.v1beta1.OutputConfig]. - - Attributes: output_location: The output location to which the exported data is written. @@ -1138,8 +1123,6 @@ "DESCRIPTOR": _EXPORTDATAOPERATIONMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", "__doc__": """Details of ExportData operation. - - Attributes: output_info: Output only. Information further describing this export data’s @@ -1161,11 +1144,9 @@ { "DESCRIPTOR": _BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Further describes this batch predict’s output. Supplements - - [BatchPredictOutputConfig][google.cloud.automl.v1beta1.BatchPredictOutputConfig]. - - + "__doc__": """Further describes this batch predict’s output. Supplements [BatchPred + ictOutputConfig][google.cloud.automl.v1beta1.BatchPredictOutputConfig] + . Attributes: output_location: The output location into which prediction output is written. @@ -1183,8 +1164,6 @@ "DESCRIPTOR": _BATCHPREDICTOPERATIONMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", "__doc__": """Details of BatchPredict operation. - - Attributes: input_config: Output only. The input config that was given upon starting @@ -1209,11 +1188,8 @@ { "DESCRIPTOR": _EXPORTMODELOPERATIONMETADATA_EXPORTMODELOUTPUTINFO, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Further describes the output of model export. Supplements - - [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. - - + "__doc__": """Further describes the output of model export. Supplements [ModelExpor + tOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. Attributes: gcs_output_directory: The full path of the Google Cloud Storage directory created, @@ -1225,8 +1201,6 @@ "DESCRIPTOR": _EXPORTMODELOPERATIONMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", "__doc__": """Details of ExportModel operation. - - Attributes: output_info: Output only. Information further describing the output of this @@ -1248,12 +1222,9 @@ { "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA_EXPORTEVALUATEDEXAMPLESOUTPUTINFO, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", - "__doc__": """Further describes the output of the evaluated examples - export. Supplements - - [ExportEvaluatedExamplesOutputConfig][google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig]. - - + "__doc__": """Further describes the output of the evaluated examples export. + Supplements [ExportEvaluatedExamplesOutputConfig][google.cloud.automl + .v1beta1.ExportEvaluatedExamplesOutputConfig]. Attributes: bigquery_output_dataset: The path of the BigQuery dataset created, in @@ -1266,8 +1237,6 @@ "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", "__doc__": """Details of EvaluatedExamples operation. - - Attributes: output_info: Output only. Information further describing the output of this diff --git a/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py b/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py index b27a20a8..56ac149c 100644 --- a/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py +++ b/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py @@ -618,10 +618,8 @@ ), "DESCRIPTOR": _PREDICTREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.prediction_service_pb2", - "__doc__": """Request message for - [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. - - + "__doc__": """Request message for [PredictionService.Predict][google.cloud.automl.v1 + beta1.PredictionService.Predict]. Attributes: name: Required. Name of the model requested to serve the prediction. @@ -666,10 +664,8 @@ ), "DESCRIPTOR": _PREDICTRESPONSE, "__module__": "google.cloud.automl_v1beta1.proto.prediction_service_pb2", - "__doc__": """Response message for - [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. - - + "__doc__": """Response message for [PredictionService.Predict][google.cloud.automl.v + 1beta1.PredictionService.Predict]. Attributes: payload: Prediction result. Translation and Text Sentiment will return @@ -716,10 +712,8 @@ ), "DESCRIPTOR": _BATCHPREDICTREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.prediction_service_pb2", - "__doc__": """Request message for - [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. - - + "__doc__": """Request message for [PredictionService.BatchPredict][google.cloud.auto + ml.v1beta1.PredictionService.BatchPredict]. Attributes: name: Required. Name of the model requested to serve the batch @@ -811,10 +805,8 @@ "__module__": "google.cloud.automl_v1beta1.proto.prediction_service_pb2", "__doc__": """Result of the Batch Predict. This message is returned in [response][google.longrunning.Operation.response] of the operation - returned by the - [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. - - + returned by the [PredictionService.BatchPredict][google.cloud.automl.v + 1beta1.PredictionService.BatchPredict]. Attributes: metadata: Additional domain-specific prediction response metadata. - diff --git a/google/cloud/automl_v1beta1/proto/ranges_pb2.py b/google/cloud/automl_v1beta1/proto/ranges_pb2.py index 99647c11..951fe7a7 100644 --- a/google/cloud/automl_v1beta1/proto/ranges_pb2.py +++ b/google/cloud/automl_v1beta1/proto/ranges_pb2.py @@ -91,8 +91,6 @@ "DESCRIPTOR": _DOUBLERANGE, "__module__": "google.cloud.automl_v1beta1.proto.ranges_pb2", "__doc__": """A range between two double numbers. - - Attributes: start: Start of the range, inclusive. diff --git a/google/cloud/automl_v1beta1/proto/regression_pb2.py b/google/cloud/automl_v1beta1/proto/regression_pb2.py index f41b338b..3e41c748 100644 --- a/google/cloud/automl_v1beta1/proto/regression_pb2.py +++ b/google/cloud/automl_v1beta1/proto/regression_pb2.py @@ -147,8 +147,6 @@ "DESCRIPTOR": _REGRESSIONEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1beta1.proto.regression_pb2", "__doc__": """Metrics for regression problems. - - Attributes: root_mean_squared_error: Output only. Root Mean Squared Error (RMSE). diff --git a/google/cloud/automl_v1beta1/proto/service_pb2.py b/google/cloud/automl_v1beta1/proto/service_pb2.py index 9143d6ed..5188ce97 100644 --- a/google/cloud/automl_v1beta1/proto/service_pb2.py +++ b/google/cloud/automl_v1beta1/proto/service_pb2.py @@ -2001,10 +2001,8 @@ { "DESCRIPTOR": _CREATEDATASETREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. - - + "__doc__": """Request message for [AutoMl.CreateDataset][google.cloud.automl.v1beta1 + .AutoMl.CreateDataset]. Attributes: parent: Required. The resource name of the project to create the @@ -2025,8 +2023,6 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. - - Attributes: name: Required. The resource name of the dataset to retrieve. @@ -2042,10 +2038,8 @@ { "DESCRIPTOR": _LISTDATASETSREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. - - + "__doc__": """Request message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1. + AutoMl.ListDatasets]. Attributes: parent: Required. The resource name of the project from which to list @@ -2078,10 +2072,8 @@ { "DESCRIPTOR": _LISTDATASETSRESPONSE, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Response message for - [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. - - + "__doc__": """Response message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1 + .AutoMl.ListDatasets]. Attributes: datasets: The datasets read. @@ -2101,10 +2093,8 @@ { "DESCRIPTOR": _UPDATEDATASETREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] - - + "__doc__": """Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1beta1 + .AutoMl.UpdateDataset] Attributes: dataset: Required. The dataset which replaces the resource on the @@ -2123,10 +2113,8 @@ { "DESCRIPTOR": _DELETEDATASETREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. - - + "__doc__": """Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1beta1 + .AutoMl.DeleteDataset]. Attributes: name: Required. The resource name of the dataset to delete. @@ -2144,8 +2132,6 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. - - Attributes: name: Required. Dataset name. Dataset must already exist. All @@ -2167,8 +2153,6 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. - - Attributes: name: Required. The resource name of the dataset. @@ -2186,10 +2170,8 @@ { "DESCRIPTOR": _GETANNOTATIONSPECREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. - - + "__doc__": """Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1b + eta1.AutoMl.GetAnnotationSpec]. Attributes: name: Required. The resource name of the annotation spec to @@ -2206,10 +2188,8 @@ { "DESCRIPTOR": _GETTABLESPECREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. - - + "__doc__": """Request message for [AutoMl.GetTableSpec][google.cloud.automl.v1beta1. + AutoMl.GetTableSpec]. Attributes: name: Required. The resource name of the table spec to retrieve. @@ -2227,10 +2207,8 @@ { "DESCRIPTOR": _LISTTABLESPECSREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. - - + "__doc__": """Request message for [AutoMl.ListTableSpecs][google.cloud.automl.v1beta + 1.AutoMl.ListTableSpecs]. Attributes: parent: Required. The resource name of the dataset to list table specs @@ -2261,10 +2239,8 @@ { "DESCRIPTOR": _LISTTABLESPECSRESPONSE, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Response message for - [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. - - + "__doc__": """Response message for [AutoMl.ListTableSpecs][google.cloud.automl.v1bet + a1.AutoMl.ListTableSpecs]. Attributes: table_specs: The table specs read. @@ -2284,10 +2260,8 @@ { "DESCRIPTOR": _UPDATETABLESPECREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] - - + "__doc__": """Request message for [AutoMl.UpdateTableSpec][google.cloud.automl.v1bet + a1.AutoMl.UpdateTableSpec] Attributes: table_spec: Required. The table spec which replaces the resource on the @@ -2306,10 +2280,8 @@ { "DESCRIPTOR": _GETCOLUMNSPECREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. - - + "__doc__": """Request message for [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1 + .AutoMl.GetColumnSpec]. Attributes: name: Required. The resource name of the column spec to retrieve. @@ -2327,10 +2299,8 @@ { "DESCRIPTOR": _LISTCOLUMNSPECSREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. - - + "__doc__": """Request message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1bet + a1.AutoMl.ListColumnSpecs]. Attributes: parent: Required. The resource name of the table spec to list column @@ -2362,10 +2332,8 @@ { "DESCRIPTOR": _LISTCOLUMNSPECSRESPONSE, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Response message for - [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. - - + "__doc__": """Response message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1be + ta1.AutoMl.ListColumnSpecs]. Attributes: column_specs: The column specs read. @@ -2385,10 +2353,8 @@ { "DESCRIPTOR": _UPDATECOLUMNSPECREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] - - + "__doc__": """Request message for [AutoMl.UpdateColumnSpec][google.cloud.automl.v1be + ta1.AutoMl.UpdateColumnSpec] Attributes: column_spec: Required. The column spec which replaces the resource on the @@ -2409,8 +2375,6 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. - - Attributes: parent: Required. Resource name of the parent project where the model @@ -2431,8 +2395,6 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. - - Attributes: name: Required. Resource name of the model. @@ -2450,8 +2412,6 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. - - Attributes: parent: Required. Resource name of the project, from which to list the @@ -2486,8 +2446,6 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Response message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. - - Attributes: model: List of models in the requested page. @@ -2509,8 +2467,6 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. - - Attributes: name: Required. Resource name of the model being deleted. @@ -2528,8 +2484,6 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. - - Attributes: model_deployment_metadata: The per-domain specific deployment parameters. @@ -2551,10 +2505,8 @@ { "DESCRIPTOR": _UNDEPLOYMODELREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. - - + "__doc__": """Request message for [AutoMl.UndeployModel][google.cloud.automl.v1beta1 + .AutoMl.UndeployModel]. Attributes: name: Required. Resource name of the model to undeploy. @@ -2572,10 +2524,8 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. - Models need to be enabled for exporting, otherwise an error code will be - returned. - - + Models need to be enabled for exporting, otherwise an error code will + be returned. Attributes: name: Required. The resource name of the model to export. @@ -2593,10 +2543,8 @@ { "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. - - + "__doc__": """Request message for [AutoMl.ExportEvaluatedExamples][google.cloud.auto + ml.v1beta1.AutoMl.ExportEvaluatedExamples]. Attributes: name: Required. The resource name of the model whose evaluated @@ -2615,10 +2563,8 @@ { "DESCRIPTOR": _GETMODELEVALUATIONREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. - - + "__doc__": """Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1 + beta1.AutoMl.GetModelEvaluation]. Attributes: name: Required. Resource name for the model evaluation. @@ -2634,10 +2580,8 @@ { "DESCRIPTOR": _LISTMODELEVALUATIONSREQUEST, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Request message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. - - + "__doc__": """Request message for [AutoMl.ListModelEvaluations][google.cloud.automl. + v1beta1.AutoMl.ListModelEvaluations]. Attributes: parent: Required. Resource name of the model to list the model @@ -2673,10 +2617,8 @@ { "DESCRIPTOR": _LISTMODELEVALUATIONSRESPONSE, "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", - "__doc__": """Response message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. - - + "__doc__": """Response message for [AutoMl.ListModelEvaluations][google.cloud.automl + .v1beta1.AutoMl.ListModelEvaluations]. Attributes: model_evaluation: List of model evaluations in the requested page. diff --git a/google/cloud/automl_v1beta1/proto/table_spec_pb2.py b/google/cloud/automl_v1beta1/proto/table_spec_pb2.py index b38c3320..9c8f872e 100644 --- a/google/cloud/automl_v1beta1/proto/table_spec_pb2.py +++ b/google/cloud/automl_v1beta1/proto/table_spec_pb2.py @@ -191,15 +191,13 @@ { "DESCRIPTOR": _TABLESPEC, "__module__": "google.cloud.automl_v1beta1.proto.table_spec_pb2", - "__doc__": """A specification of a relational table. The table’s schema - is represented via its child column specs. It is pre-populated as part - of ImportData by schema inference algorithm, the version of which is a - required parameter of ImportData InputConfig. Note: While working with a - table, at times the schema may be inconsistent with the data in the + "__doc__": """A specification of a relational table. The table’s schema is + represented via its child column specs. It is pre-populated as part of + ImportData by schema inference algorithm, the version of which is a + required parameter of ImportData InputConfig. Note: While working with + a table, at times the schema may be inconsistent with the data in the table (e.g. string in a FLOAT64 column). The consistency validation is done upon creation of a model. Used by: \* Tables - - Attributes: name: Output only. The resource name of the table spec. Form: ``pro diff --git a/google/cloud/automl_v1beta1/proto/tables_pb2.py b/google/cloud/automl_v1beta1/proto/tables_pb2.py index 32fe800b..0be54c2a 100644 --- a/google/cloud/automl_v1beta1/proto/tables_pb2.py +++ b/google/cloud/automl_v1beta1/proto/tables_pb2.py @@ -700,8 +700,6 @@ "DESCRIPTOR": _TABLESDATASETMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", "__doc__": """Metadata for a dataset used for AutoML Tables. - - Attributes: primary_table_spec_id: Output only. The table_spec_id of the primary table of this @@ -770,8 +768,6 @@ "DESCRIPTOR": _TABLESMODELMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", "__doc__": """Model metadata specific to AutoML Tables. - - Attributes: additional_optimization_objective_config: Additional optimization objective configuration. Required for @@ -864,8 +860,6 @@ "DESCRIPTOR": _TABLESANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", "__doc__": """Contains annotation details specific to Tables. - - Attributes: score: Output only. A confidence estimate between 0.0 and 1.0, @@ -916,10 +910,8 @@ { "DESCRIPTOR": _TABLESMODELCOLUMNINFO, "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", - "__doc__": """An information specific to given column and Tables Model, - in context of the Model and the predictions created by it. - - + "__doc__": """An information specific to given column and Tables Model, in context + of the Model and the predictions created by it. Attributes: column_spec_name: Output only. The name of the ColumnSpec describing the column. diff --git a/google/cloud/automl_v1beta1/proto/temporal_pb2.py b/google/cloud/automl_v1beta1/proto/temporal_pb2.py index 668bc578..80e8359d 100644 --- a/google/cloud/automl_v1beta1/proto/temporal_pb2.py +++ b/google/cloud/automl_v1beta1/proto/temporal_pb2.py @@ -102,8 +102,6 @@ "__module__": "google.cloud.automl_v1beta1.proto.temporal_pb2", "__doc__": """A time period inside of an example that has a time dimension (e.g. video). - - Attributes: start_time_offset: Start of the time segment (inclusive), represented as the diff --git a/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py b/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py index bdf49bf5..7e7f80f1 100644 --- a/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py @@ -274,8 +274,6 @@ "DESCRIPTOR": _TEXTEXTRACTIONANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.text_extraction_pb2", "__doc__": """Annotation for identifying spans of text. - - Attributes: annotation: Required. Text extraction annotations can either be a text @@ -304,8 +302,6 @@ "DESCRIPTOR": _TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, "__module__": "google.cloud.automl_v1beta1.proto.text_extraction_pb2", "__doc__": """Metrics for a single confidence threshold. - - Attributes: confidence_threshold: Output only. The confidence threshold value used to compute @@ -324,8 +320,6 @@ "DESCRIPTOR": _TEXTEXTRACTIONEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1beta1.proto.text_extraction_pb2", "__doc__": """Model evaluation metrics for text extraction problems. - - Attributes: au_prc: Output only. The Area under precision recall curve metric. diff --git a/google/cloud/automl_v1beta1/proto/text_pb2.py b/google/cloud/automl_v1beta1/proto/text_pb2.py index 2418465c..08d48292 100644 --- a/google/cloud/automl_v1beta1/proto/text_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_pb2.py @@ -244,8 +244,6 @@ "DESCRIPTOR": _TEXTCLASSIFICATIONDATASETMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", "__doc__": """Dataset metadata for classification. - - Attributes: classification_type: Required. Type of the classification problem. @@ -262,8 +260,6 @@ "DESCRIPTOR": _TEXTCLASSIFICATIONMODELMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", "__doc__": """Model metadata that is specific to text classification. - - Attributes: classification_type: Output only. Classification type of the dataset used to train @@ -280,9 +276,7 @@ { "DESCRIPTOR": _TEXTEXTRACTIONDATASETMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", - "__doc__": """Dataset metadata that is specific to text extraction - - """, + "__doc__": """Dataset metadata that is specific to text extraction""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionDatasetMetadata) }, ) @@ -294,9 +288,7 @@ { "DESCRIPTOR": _TEXTEXTRACTIONMODELMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", - "__doc__": """Model metadata that is specific to text extraction. - - """, + "__doc__": """Model metadata that is specific to text extraction.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionModelMetadata) }, ) @@ -309,8 +301,6 @@ "DESCRIPTOR": _TEXTSENTIMENTDATASETMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", "__doc__": """Dataset metadata for text sentiment. - - Attributes: sentiment_max: Required. A sentiment is expressed as an integer ordinal, @@ -331,9 +321,7 @@ { "DESCRIPTOR": _TEXTSENTIMENTMODELMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", - "__doc__": """Model metadata that is specific to text sentiment. - - """, + "__doc__": """Model metadata that is specific to text sentiment.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSentimentModelMetadata) }, ) diff --git a/google/cloud/automl_v1beta1/proto/text_segment_pb2.py b/google/cloud/automl_v1beta1/proto/text_segment_pb2.py index 5822253c..150c0245 100644 --- a/google/cloud/automl_v1beta1/proto/text_segment_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_segment_pb2.py @@ -108,10 +108,8 @@ { "DESCRIPTOR": _TEXTSEGMENT, "__module__": "google.cloud.automl_v1beta1.proto.text_segment_pb2", - "__doc__": """A contiguous part of a text (string), assuming it has an - UTF-8 NFC encoding. - - + "__doc__": """A contiguous part of a text (string), assuming it has an UTF-8 NFC + encoding. Attributes: content: Output only. The content of the TextSegment. diff --git a/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py b/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py index d4c0c728..737523cd 100644 --- a/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py @@ -270,8 +270,6 @@ "DESCRIPTOR": _TEXTSENTIMENTANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.text_sentiment_pb2", "__doc__": """Contains annotation details specific to text sentiment. - - Attributes: sentiment: Output only. The sentiment with the semantic, as given to the @@ -302,8 +300,6 @@ "DESCRIPTOR": _TEXTSENTIMENTEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1beta1.proto.text_sentiment_pb2", "__doc__": """Model evaluation metrics for text sentiment problems. - - Attributes: precision: Output only. Precision. diff --git a/google/cloud/automl_v1beta1/proto/translation_pb2.py b/google/cloud/automl_v1beta1/proto/translation_pb2.py index 539d700c..411a25bb 100644 --- a/google/cloud/automl_v1beta1/proto/translation_pb2.py +++ b/google/cloud/automl_v1beta1/proto/translation_pb2.py @@ -282,8 +282,6 @@ "DESCRIPTOR": _TRANSLATIONDATASETMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", "__doc__": """Dataset metadata that is specific to translation. - - Attributes: source_language_code: Required. The BCP-47 language code of the source language. @@ -302,8 +300,6 @@ "DESCRIPTOR": _TRANSLATIONEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", "__doc__": """Evaluation metrics for the dataset. - - Attributes: bleu_score: Output only. BLEU score. @@ -322,8 +318,6 @@ "DESCRIPTOR": _TRANSLATIONMODELMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", "__doc__": """Model metadata that is specific to translation. - - Attributes: base_model: The resource name of the model to use as a baseline to train @@ -349,8 +343,6 @@ "DESCRIPTOR": _TRANSLATIONANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", "__doc__": """Annotation details specific to translation. - - Attributes: translated_content: Output only . The translated content. diff --git a/google/cloud/automl_v1beta1/proto/video_pb2.py b/google/cloud/automl_v1beta1/proto/video_pb2.py index da658ee0..e1903ba3 100644 --- a/google/cloud/automl_v1beta1/proto/video_pb2.py +++ b/google/cloud/automl_v1beta1/proto/video_pb2.py @@ -130,10 +130,8 @@ { "DESCRIPTOR": _VIDEOCLASSIFICATIONDATASETMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", - "__doc__": """Dataset metadata specific to video classification. All - Video Classification datasets are treated as multi label. - - """, + "__doc__": """Dataset metadata specific to video classification. All Video + Classification datasets are treated as multi label.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoClassificationDatasetMetadata) }, ) @@ -145,9 +143,7 @@ { "DESCRIPTOR": _VIDEOOBJECTTRACKINGDATASETMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", - "__doc__": """Dataset metadata specific to video object tracking. - - """, + "__doc__": """Dataset metadata specific to video object tracking.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingDatasetMetadata) }, ) @@ -159,9 +155,7 @@ { "DESCRIPTOR": _VIDEOCLASSIFICATIONMODELMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", - "__doc__": """Model metadata specific to video classification. - - """, + "__doc__": """Model metadata specific to video classification.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoClassificationModelMetadata) }, ) @@ -173,9 +167,7 @@ { "DESCRIPTOR": _VIDEOOBJECTTRACKINGMODELMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", - "__doc__": """Model metadata specific to video object tracking. - - """, + "__doc__": """Model metadata specific to video object tracking.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingModelMetadata) }, ) diff --git a/synth.metadata b/synth.metadata index 1e9b3275..72f56270 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "e0f9d9e1f9de890db765be46f45ca8490723e3eb", - "internalRef": "309824146" + "sha": "dec3204175104cef49bf21d685d5517caaf0058f", + "internalRef": "312689208" } }, { From 2c1ab16e33663ceae9d8d2a9daef89ed26c49283 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Tue, 23 Jun 2020 05:44:28 -0700 Subject: [PATCH 3/9] bazel: update protobuf, rules_go, gazelle, and gapic-generator-go versions - protobuf v3.12.1 - rules_go v0.23.0 - gazelle v0.21.0 - gapic-generator-go v0.14.1 PiperOrigin-RevId: 313460921 Source-Author: Google APIs Source-Date: Wed May 27 14:10:16 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: c4e37010d74071851ff24121f522e802231ac86e Source-Link: https://github.com/googleapis/googleapis/commit/c4e37010d74071851ff24121f522e802231ac86e --- .../automl_v1/proto/annotation_payload_pb2.py | 10 ++ .../automl_v1/proto/annotation_spec_pb2.py | 5 + .../automl_v1/proto/classification_pb2.py | 47 ++++++- .../cloud/automl_v1/proto/data_items_pb2.py | 105 +++++++++++++-- google/cloud/automl_v1/proto/dataset_pb2.py | 19 +++ google/cloud/automl_v1/proto/detection_pb2.py | 17 +++ google/cloud/automl_v1/proto/geometry_pb2.py | 6 + google/cloud/automl_v1/proto/image_pb2.py | 23 ++++ google/cloud/automl_v1/proto/io_pb2.py | 31 +++++ .../automl_v1/proto/model_evaluation_pb2.py | 13 ++ google/cloud/automl_v1/proto/model_pb2.py | 36 +++++- .../cloud/automl_v1/proto/operations_pb2.py | 37 ++++++ .../automl_v1/proto/prediction_service_pb2.py | 31 +++++ google/cloud/automl_v1/proto/service_pb2.py | 84 ++++++++++++ .../automl_v1/proto/text_extraction_pb2.py | 13 ++ google/cloud/automl_v1/proto/text_pb2.py | 10 ++ .../cloud/automl_v1/proto/text_segment_pb2.py | 5 + .../automl_v1/proto/text_sentiment_pb2.py | 12 ++ .../cloud/automl_v1/proto/translation_pb2.py | 13 ++ .../proto/annotation_payload_pb2.py | 13 ++ .../proto/annotation_spec_pb2.py | 5 + .../proto/classification_pb2.py | 52 +++++++- .../automl_v1beta1/proto/column_spec_pb2.py | 11 ++ .../automl_v1beta1/proto/data_items_pb2.py | 110 ++++++++++++++-- .../automl_v1beta1/proto/data_stats_pb2.py | 50 ++++++++ .../automl_v1beta1/proto/data_types_pb2.py | 57 ++++++++- .../cloud/automl_v1beta1/proto/dataset_pb2.py | 18 +++ .../automl_v1beta1/proto/detection_pb2.py | 27 ++++ .../automl_v1beta1/proto/geometry_pb2.py | 6 + .../cloud/automl_v1beta1/proto/image_pb2.py | 23 ++++ google/cloud/automl_v1beta1/proto/io_pb2.py | 45 +++++++ .../proto/model_evaluation_pb2.py | 15 +++ .../cloud/automl_v1beta1/proto/model_pb2.py | 34 ++++- .../automl_v1beta1/proto/operations_pb2.py | 42 ++++++ .../proto/prediction_service_pb2.py | 31 +++++ .../cloud/automl_v1beta1/proto/ranges_pb2.py | 4 + .../automl_v1beta1/proto/regression_pb2.py | 7 + .../cloud/automl_v1beta1/proto/service_pb2.py | 120 ++++++++++++++++++ .../automl_v1beta1/proto/table_spec_pb2.py | 9 ++ .../cloud/automl_v1beta1/proto/tables_pb2.py | 32 +++++ .../automl_v1beta1/proto/temporal_pb2.py | 4 + .../proto/text_extraction_pb2.py | 13 ++ google/cloud/automl_v1beta1/proto/text_pb2.py | 10 ++ .../automl_v1beta1/proto/text_segment_pb2.py | 5 + .../proto/text_sentiment_pb2.py | 13 ++ .../automl_v1beta1/proto/translation_pb2.py | 13 ++ .../cloud/automl_v1beta1/proto/video_pb2.py | 5 + synth.metadata | 4 +- 48 files changed, 1259 insertions(+), 36 deletions(-) diff --git a/google/cloud/automl_v1/proto/annotation_payload_pb2.py b/google/cloud/automl_v1/proto/annotation_payload_pb2.py index e81ba3d7..c0ad5894 100644 --- a/google/cloud/automl_v1/proto/annotation_payload_pb2.py +++ b/google/cloud/automl_v1/proto/annotation_payload_pb2.py @@ -36,6 +36,7 @@ package="google.cloud.automl.v1", syntax="proto3", serialized_options=b"\n\032com.google.cloud.automl.v1P\001Z\n\x0cinput_config\x18\x03 \x01(\x0b\x32#.google.cloud.automl.v1.InputConfigB\x03\xe0\x41\x02"\x8a\x01\n\x11\x45xportDataRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset\x12@\n\routput_config\x18\x03 \x01(\x0b\x32$.google.cloud.automl.v1.OutputConfigB\x03\xe0\x41\x02"V\n\x18GetAnnotationSpecRequest\x12:\n\x04name\x18\x01 \x01(\tB,\xe0\x41\x02\xfa\x41&\n$automl.googleapis.com/AnnotationSpec"\x82\x01\n\x12\x43reateModelRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x31\n\x05model\x18\x04 \x01(\x0b\x32\x1d.google.cloud.automl.v1.ModelB\x03\xe0\x41\x02"D\n\x0fGetModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\x85\x01\n\x11ListModelsRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"[\n\x12ListModelsResponse\x12,\n\x05model\x18\x01 \x03(\x0b\x32\x1d.google.cloud.automl.v1.Model\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"G\n\x12\x44\x65leteModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"}\n\x12UpdateModelRequest\x12\x31\n\x05model\x18\x01 \x01(\x0b\x32\x1d.google.cloud.automl.v1.ModelB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"\xe3\x02\n\x12\x44\x65ployModelRequest\x12\x7f\n0image_object_detection_model_deployment_metadata\x18\x02 \x01(\x0b\x32\x43.google.cloud.automl.v1.ImageObjectDetectionModelDeploymentMetadataH\x00\x12|\n.image_classification_model_deployment_metadata\x18\x04 \x01(\x0b\x32\x42.google.cloud.automl.v1.ImageClassificationModelDeploymentMetadataH\x00\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/ModelB\x1b\n\x19model_deployment_metadata"I\n\x14UndeployModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\x94\x01\n\x12\x45xportModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12K\n\routput_config\x18\x03 \x01(\x0b\x32/.google.cloud.automl.v1.ModelExportOutputConfigB\x03\xe0\x41\x02"X\n\x19GetModelEvaluationRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%automl.googleapis.com/ModelEvaluation"\x8e\x01\n\x1bListModelEvaluationsRequest\x12\x33\n\x06parent\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12\x13\n\x06\x66ilter\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"z\n\x1cListModelEvaluationsResponse\x12\x41\n\x10model_evaluation\x18\x01 \x03(\x0b\x32\'.google.cloud.automl.v1.ModelEvaluation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xe8\x1b\n\x06\x41utoMl\x12\xcb\x01\n\rCreateDataset\x12,.google.cloud.automl.v1.CreateDatasetRequest\x1a\x1d.google.longrunning.Operation"m\x82\xd3\xe4\x93\x02\x37",/v1/{parent=projects/*/locations/*}/datasets:\x07\x64\x61taset\xda\x41\x0eparent,dataset\xca\x41\x1c\n\x07\x44\x61taset\x12\x11OperationMetadata\x12\x95\x01\n\nGetDataset\x12).google.cloud.automl.v1.GetDatasetRequest\x1a\x1f.google.cloud.automl.v1.Dataset";\x82\xd3\xe4\x93\x02.\x12,/v1/{name=projects/*/locations/*/datasets/*}\xda\x41\x04name\x12\xa8\x01\n\x0cListDatasets\x12+.google.cloud.automl.v1.ListDatasetsRequest\x1a,.google.cloud.automl.v1.ListDatasetsResponse"=\x82\xd3\xe4\x93\x02.\x12,/v1/{parent=projects/*/locations/*}/datasets\xda\x41\x06parent\x12\xbb\x01\n\rUpdateDataset\x12,.google.cloud.automl.v1.UpdateDatasetRequest\x1a\x1f.google.cloud.automl.v1.Dataset"[\x82\xd3\xe4\x93\x02?24/v1/{dataset.name=projects/*/locations/*/datasets/*}:\x07\x64\x61taset\xda\x41\x13\x64\x61taset,update_mask\x12\xc6\x01\n\rDeleteDataset\x12,.google.cloud.automl.v1.DeleteDatasetRequest\x1a\x1d.google.longrunning.Operation"h\x82\xd3\xe4\x93\x02.*,/v1/{name=projects/*/locations/*/datasets/*}\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xdc\x01\n\nImportData\x12).google.cloud.automl.v1.ImportDataRequest\x1a\x1d.google.longrunning.Operation"\x83\x01\x82\xd3\xe4\x93\x02<"7/v1/{name=projects/*/locations/*/datasets/*}:importData:\x01*\xda\x41\x11name,input_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xdd\x01\n\nExportData\x12).google.cloud.automl.v1.ExportDataRequest\x1a\x1d.google.longrunning.Operation"\x84\x01\x82\xd3\xe4\x93\x02<"7/v1/{name=projects/*/locations/*/datasets/*}:exportData:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xbc\x01\n\x11GetAnnotationSpec\x12\x30.google.cloud.automl.v1.GetAnnotationSpecRequest\x1a&.google.cloud.automl.v1.AnnotationSpec"M\x82\xd3\xe4\x93\x02@\x12>/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}\xda\x41\x04name\x12\xbf\x01\n\x0b\x43reateModel\x12*.google.cloud.automl.v1.CreateModelRequest\x1a\x1d.google.longrunning.Operation"e\x82\xd3\xe4\x93\x02\x33"*/v1/{parent=projects/*/locations/*}/models:\x05model\xda\x41\x0cparent,model\xca\x41\x1a\n\x05Model\x12\x11OperationMetadata\x12\x8d\x01\n\x08GetModel\x12\'.google.cloud.automl.v1.GetModelRequest\x1a\x1d.google.cloud.automl.v1.Model"9\x82\xd3\xe4\x93\x02,\x12*/v1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\x12\xa0\x01\n\nListModels\x12).google.cloud.automl.v1.ListModelsRequest\x1a*.google.cloud.automl.v1.ListModelsResponse";\x82\xd3\xe4\x93\x02,\x12*/v1/{parent=projects/*/locations/*}/models\xda\x41\x06parent\x12\xc0\x01\n\x0b\x44\x65leteModel\x12*.google.cloud.automl.v1.DeleteModelRequest\x1a\x1d.google.longrunning.Operation"f\x82\xd3\xe4\x93\x02,**/v1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xad\x01\n\x0bUpdateModel\x12*.google.cloud.automl.v1.UpdateModelRequest\x1a\x1d.google.cloud.automl.v1.Model"S\x82\xd3\xe4\x93\x02\x39\x32\x30/v1/{model.name=projects/*/locations/*/models/*}:\x05model\xda\x41\x11model,update_mask\x12\xca\x01\n\x0b\x44\x65ployModel\x12*.google.cloud.automl.v1.DeployModelRequest\x1a\x1d.google.longrunning.Operation"p\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/locations/*/models/*}:deploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xd0\x01\n\rUndeployModel\x12,.google.cloud.automl.v1.UndeployModelRequest\x1a\x1d.google.longrunning.Operation"r\x82\xd3\xe4\x93\x02\x38"3/v1/{name=projects/*/locations/*/models/*}:undeploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xd8\x01\n\x0b\x45xportModel\x12*.google.cloud.automl.v1.ExportModelRequest\x1a\x1d.google.longrunning.Operation"~\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/locations/*/models/*}:export:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xbe\x01\n\x12GetModelEvaluation\x12\x31.google.cloud.automl.v1.GetModelEvaluationRequest\x1a\'.google.cloud.automl.v1.ModelEvaluation"L\x82\xd3\xe4\x93\x02?\x12=/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\xda\x41\x04name\x12\xd8\x01\n\x14ListModelEvaluations\x12\x33.google.cloud.automl.v1.ListModelEvaluationsRequest\x1a\x34.google.cloud.automl.v1.ListModelEvaluationsResponse"U\x82\xd3\xe4\x93\x02?\x12=/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations\xda\x41\rparent,filter\x1aI\xca\x41\x15\x61utoml.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xb7\x01\n\x1a\x63om.google.cloud.automl.v1B\x0b\x41utoMlProtoP\x01Z/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="CreateModel", @@ -1940,6 +2014,7 @@ input_type=_CREATEMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b'\202\323\344\223\0023"*/v1/{parent=projects/*/locations/*}/models:\005model\332A\014parent,model\312A\032\n\005Model\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetModel", @@ -1949,6 +2024,7 @@ input_type=_GETMODELREQUEST, output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2._MODEL, serialized_options=b"\202\323\344\223\002,\022*/v1/{name=projects/*/locations/*/models/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListModels", @@ -1958,6 +2034,7 @@ input_type=_LISTMODELSREQUEST, output_type=_LISTMODELSRESPONSE, serialized_options=b"\202\323\344\223\002,\022*/v1/{parent=projects/*/locations/*}/models\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeleteModel", @@ -1967,6 +2044,7 @@ input_type=_DELETEMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b"\202\323\344\223\002,**/v1/{name=projects/*/locations/*/models/*}\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UpdateModel", @@ -1976,6 +2054,7 @@ input_type=_UPDATEMODELREQUEST, output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2._MODEL, serialized_options=b"\202\323\344\223\002920/v1/{model.name=projects/*/locations/*/models/*}:\005model\332A\021model,update_mask", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeployModel", @@ -1985,6 +2064,7 @@ input_type=_DEPLOYMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b'\202\323\344\223\0026"1/v1/{name=projects/*/locations/*/models/*}:deploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UndeployModel", @@ -1994,6 +2074,7 @@ input_type=_UNDEPLOYMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b'\202\323\344\223\0028"3/v1/{name=projects/*/locations/*/models/*}:undeploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ExportModel", @@ -2003,6 +2084,7 @@ input_type=_EXPORTMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b'\202\323\344\223\0026"1/v1/{name=projects/*/locations/*/models/*}:export:\001*\332A\022name,output_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetModelEvaluation", @@ -2012,6 +2094,7 @@ input_type=_GETMODELEVALUATIONREQUEST, output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__evaluation__pb2._MODELEVALUATION, serialized_options=b"\202\323\344\223\002?\022=/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListModelEvaluations", @@ -2021,6 +2104,7 @@ input_type=_LISTMODELEVALUATIONSREQUEST, output_type=_LISTMODELEVALUATIONSRESPONSE, serialized_options=b"\202\323\344\223\002?\022=/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations\332A\rparent,filter", + create_key=_descriptor._internal_create_key, ), ], ) diff --git a/google/cloud/automl_v1/proto/text_extraction_pb2.py b/google/cloud/automl_v1/proto/text_extraction_pb2.py index c23e83ba..79caedcb 100644 --- a/google/cloud/automl_v1/proto/text_extraction_pb2.py +++ b/google/cloud/automl_v1/proto/text_extraction_pb2.py @@ -23,6 +23,7 @@ package="google.cloud.automl.v1", syntax="proto3", serialized_options=b"\n\032com.google.cloud.automl.v1P\001Z\n\x0ctime_segment\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TimeSegment"\xa9\x07\n\x1f\x43lassificationEvaluationMetrics\x12\x0e\n\x06\x61u_prc\x18\x01 \x01(\x02\x12\x17\n\x0b\x62\x61se_au_prc\x18\x02 \x01(\x02\x42\x02\x18\x01\x12\x0e\n\x06\x61u_roc\x18\x06 \x01(\x02\x12\x10\n\x08log_loss\x18\x07 \x01(\x02\x12u\n\x18\x63onfidence_metrics_entry\x18\x03 \x03(\x0b\x32S.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry\x12\x66\n\x10\x63onfusion_matrix\x18\x04 \x01(\x0b\x32L.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x05 \x03(\t\x1a\xfc\x02\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x1a\n\x12position_threshold\x18\x0e \x01(\x05\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x11\n\tprecision\x18\x03 \x01(\x02\x12\x1b\n\x13\x66\x61lse_positive_rate\x18\x08 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x04 \x01(\x02\x12\x12\n\nrecall_at1\x18\x05 \x01(\x02\x12\x15\n\rprecision_at1\x18\x06 \x01(\x02\x12\x1f\n\x17\x66\x61lse_positive_rate_at1\x18\t \x01(\x02\x12\x14\n\x0c\x66\x31_score_at1\x18\x07 \x01(\x02\x12\x1b\n\x13true_positive_count\x18\n \x01(\x03\x12\x1c\n\x14\x66\x61lse_positive_count\x18\x0b \x01(\x03\x12\x1c\n\x14\x66\x61lse_negative_count\x18\x0c \x01(\x03\x12\x1b\n\x13true_negative_count\x18\r \x01(\x03\x1a\xc0\x01\n\x0f\x43onfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x01 \x03(\t\x12\x14\n\x0c\x64isplay_name\x18\x03 \x03(\t\x12]\n\x03row\x18\x02 \x03(\x0b\x32P.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.Row\x1a\x1c\n\x03Row\x12\x15\n\rexample_count\x18\x01 \x03(\x05*Y\n\x12\x43lassificationType\x12#\n\x1f\x43LASSIFICATION_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nMULTICLASS\x10\x01\x12\x0e\n\nMULTILABEL\x10\x02\x42\xb8\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x13\x43lassificationProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2.DESCRIPTOR, @@ -36,6 +37,7 @@ full_name="google.cloud.automl.v1beta1.ClassificationType", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="CLASSIFICATION_TYPE_UNSPECIFIED", @@ -43,12 +45,23 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="MULTICLASS", index=1, number=1, serialized_options=None, type=None + name="MULTICLASS", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="MULTILABEL", index=2, number=2, serialized_options=None, type=None + name="MULTILABEL", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -70,6 +83,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="score", @@ -88,6 +102,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -109,6 +124,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="type", @@ -127,6 +143,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="classification_annotation", @@ -145,6 +162,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_segment", @@ -163,6 +181,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -184,6 +203,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="confidence_threshold", @@ -202,6 +222,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="position_threshold", @@ -220,6 +241,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="recall", @@ -238,6 +260,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="precision", @@ -256,6 +279,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="false_positive_rate", @@ -274,6 +298,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="f1_score", @@ -292,6 +317,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="recall_at1", @@ -310,6 +336,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="precision_at1", @@ -328,6 +355,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="false_positive_rate_at1", @@ -346,6 +374,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="f1_score_at1", @@ -364,6 +393,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="true_positive_count", @@ -382,6 +412,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="false_positive_count", @@ -400,6 +431,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="false_negative_count", @@ -418,6 +450,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="true_negative_count", @@ -436,6 +469,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -456,6 +490,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="example_count", @@ -474,6 +509,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -494,6 +530,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="annotation_spec_id", @@ -512,6 +549,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="display_name", @@ -530,6 +568,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="row", @@ -548,6 +587,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -568,6 +608,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="au_prc", @@ -586,6 +627,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="base_au_prc", @@ -604,6 +646,7 @@ extension_scope=None, serialized_options=b"\030\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="au_roc", @@ -622,6 +665,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="log_loss", @@ -640,6 +684,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence_metrics_entry", @@ -658,6 +703,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confusion_matrix", @@ -676,6 +722,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="annotation_spec_id", @@ -694,6 +741,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/column_spec_pb2.py b/google/cloud/automl_v1beta1/proto/column_spec_pb2.py index f13fb533..deda85df 100644 --- a/google/cloud/automl_v1beta1/proto/column_spec_pb2.py +++ b/google/cloud/automl_v1beta1/proto/column_spec_pb2.py @@ -27,6 +27,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n3google/cloud/automl_v1beta1/proto/column_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_stats.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_types.proto\x1a\x1cgoogle/api/annotations.proto"\x9b\x04\n\nColumnSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\tdata_type\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataType\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12:\n\ndata_stats\x18\x04 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats\x12X\n\x16top_correlated_columns\x18\x05 \x03(\x0b\x32\x38.google.cloud.automl.v1beta1.ColumnSpec.CorrelatedColumn\x12\x0c\n\x04\x65tag\x18\x06 \x01(\t\x1at\n\x10\x43orrelatedColumn\x12\x16\n\x0e\x63olumn_spec_id\x18\x01 \x01(\t\x12H\n\x11\x63orrelation_stats\x18\x02 \x01(\x0b\x32-.google.cloud.automl.v1beta1.CorrelationStats:\x94\x01\xea\x41\x90\x01\n automl.googleapis.com/ColumnSpec\x12lprojects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}B\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_api_dot_resource__pb2.DESCRIPTOR, @@ -43,6 +44,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="column_spec_id", @@ -61,6 +63,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="correlation_stats", @@ -79,6 +82,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -99,6 +103,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -117,6 +122,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="data_type", @@ -135,6 +141,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="display_name", @@ -153,6 +160,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="data_stats", @@ -171,6 +179,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="top_correlated_columns", @@ -189,6 +198,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="etag", @@ -207,6 +217,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/data_items_pb2.py b/google/cloud/automl_v1beta1/proto/data_items_pb2.py index e0770ccd..dd5df579 100644 --- a/google/cloud/automl_v1beta1/proto/data_items_pb2.py +++ b/google/cloud/automl_v1beta1/proto/data_items_pb2.py @@ -35,6 +35,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/data_items.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x30google/cloud/automl_v1beta1/proto/geometry.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto\x1a\x34google/cloud/automl_v1beta1/proto/text_segment.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1cgoogle/api/annotations.proto"\x7f\n\x05Image\x12\x15\n\x0bimage_bytes\x18\x01 \x01(\x0cH\x00\x12@\n\x0cinput_config\x18\x06 \x01(\x0b\x32(.google.cloud.automl.v1beta1.InputConfigH\x00\x12\x15\n\rthumbnail_uri\x18\x04 \x01(\tB\x06\n\x04\x64\x61ta"F\n\x0bTextSnippet\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\t\x12\x11\n\tmime_type\x18\x02 \x01(\t\x12\x13\n\x0b\x63ontent_uri\x18\x04 \x01(\t"\xef\x01\n\x12\x44ocumentDimensions\x12S\n\x04unit\x18\x01 \x01(\x0e\x32\x45.google.cloud.automl.v1beta1.DocumentDimensions.DocumentDimensionUnit\x12\r\n\x05width\x18\x02 \x01(\x02\x12\x0e\n\x06height\x18\x03 \x01(\x02"e\n\x15\x44ocumentDimensionUnit\x12\'\n#DOCUMENT_DIMENSION_UNIT_UNSPECIFIED\x10\x00\x12\x08\n\x04INCH\x10\x01\x12\x0e\n\nCENTIMETER\x10\x02\x12\t\n\x05POINT\x10\x03"\xf9\x05\n\x08\x44ocument\x12\x46\n\x0cinput_config\x18\x01 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.DocumentInputConfig\x12?\n\rdocument_text\x18\x02 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippet\x12<\n\x06layout\x18\x03 \x03(\x0b\x32,.google.cloud.automl.v1beta1.Document.Layout\x12L\n\x13\x64ocument_dimensions\x18\x04 \x01(\x0b\x32/.google.cloud.automl.v1beta1.DocumentDimensions\x12\x12\n\npage_count\x18\x05 \x01(\x05\x1a\xc3\x03\n\x06Layout\x12>\n\x0ctext_segment\x18\x01 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSegment\x12\x13\n\x0bpage_number\x18\x02 \x01(\x05\x12@\n\rbounding_poly\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12W\n\x11text_segment_type\x18\x04 \x01(\x0e\x32<.google.cloud.automl.v1beta1.Document.Layout.TextSegmentType"\xc8\x01\n\x0fTextSegmentType\x12!\n\x1dTEXT_SEGMENT_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05TOKEN\x10\x01\x12\r\n\tPARAGRAPH\x10\x02\x12\x0e\n\nFORM_FIELD\x10\x03\x12\x13\n\x0f\x46ORM_FIELD_NAME\x10\x04\x12\x17\n\x13\x46ORM_FIELD_CONTENTS\x10\x05\x12\t\n\x05TABLE\x10\x06\x12\x10\n\x0cTABLE_HEADER\x10\x07\x12\r\n\tTABLE_ROW\x10\x08\x12\x0e\n\nTABLE_CELL\x10\t"F\n\x03Row\x12\x17\n\x0f\x63olumn_spec_ids\x18\x02 \x03(\t\x12&\n\x06values\x18\x03 \x03(\x0b\x32\x16.google.protobuf.Value"\xfe\x01\n\x0e\x45xamplePayload\x12\x33\n\x05image\x18\x01 \x01(\x0b\x32".google.cloud.automl.v1beta1.ImageH\x00\x12@\n\x0ctext_snippet\x18\x02 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippetH\x00\x12\x39\n\x08\x64ocument\x18\x04 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DocumentH\x00\x12/\n\x03row\x18\x03 \x01(\x0b\x32 .google.cloud.automl.v1beta1.RowH\x00\x42\t\n\x07payloadB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_geometry__pb2.DESCRIPTOR, @@ -54,6 +55,7 @@ full_name="google.cloud.automl.v1beta1.DocumentDimensions.DocumentDimensionUnit", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="DOCUMENT_DIMENSION_UNIT_UNSPECIFIED", @@ -61,15 +63,31 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="INCH", index=1, number=1, serialized_options=None, type=None + name="INCH", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CENTIMETER", index=2, number=2, serialized_options=None, type=None + name="CENTIMETER", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="POINT", index=3, number=3, serialized_options=None, type=None + name="POINT", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -84,6 +102,7 @@ full_name="google.cloud.automl.v1beta1.Document.Layout.TextSegmentType", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="TEXT_SEGMENT_TYPE_UNSPECIFIED", @@ -91,15 +110,31 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TOKEN", index=1, number=1, serialized_options=None, type=None + name="TOKEN", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PARAGRAPH", index=2, number=2, serialized_options=None, type=None + name="PARAGRAPH", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FORM_FIELD", index=3, number=3, serialized_options=None, type=None + name="FORM_FIELD", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="FORM_FIELD_NAME", @@ -107,6 +142,7 @@ number=4, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="FORM_FIELD_CONTENTS", @@ -114,18 +150,39 @@ number=5, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TABLE", index=6, number=6, serialized_options=None, type=None + name="TABLE", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TABLE_HEADER", index=7, number=7, serialized_options=None, type=None + name="TABLE_HEADER", + index=7, + number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TABLE_ROW", index=8, number=8, serialized_options=None, type=None + name="TABLE_ROW", + index=8, + number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TABLE_CELL", index=9, number=9, serialized_options=None, type=None + name="TABLE_CELL", + index=9, + number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -142,6 +199,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="image_bytes", @@ -160,6 +218,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_config", @@ -178,6 +237,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="thumbnail_uri", @@ -196,6 +256,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -211,6 +272,7 @@ full_name="google.cloud.automl.v1beta1.Image.data", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -225,6 +287,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="content", @@ -243,6 +306,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mime_type", @@ -261,6 +325,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="content_uri", @@ -279,6 +344,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -300,6 +366,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="unit", @@ -318,6 +385,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="width", @@ -336,6 +404,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="height", @@ -354,6 +423,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -375,6 +445,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="text_segment", @@ -393,6 +464,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_number", @@ -411,6 +483,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bounding_poly", @@ -429,6 +502,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_segment_type", @@ -447,6 +521,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -467,6 +542,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_config", @@ -485,6 +561,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="document_text", @@ -503,6 +580,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="layout", @@ -521,6 +599,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="document_dimensions", @@ -539,6 +618,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_count", @@ -557,6 +637,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -578,6 +659,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="column_spec_ids", @@ -596,6 +678,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="values", @@ -614,6 +697,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -635,6 +719,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="image", @@ -653,6 +738,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_snippet", @@ -671,6 +757,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="document", @@ -689,6 +776,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="row", @@ -707,6 +795,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -722,6 +811,7 @@ full_name="google.cloud.automl.v1beta1.ExamplePayload.payload", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], diff --git a/google/cloud/automl_v1beta1/proto/data_stats_pb2.py b/google/cloud/automl_v1beta1/proto/data_stats_pb2.py index ac493e73..3447a04b 100644 --- a/google/cloud/automl_v1beta1/proto/data_stats_pb2.py +++ b/google/cloud/automl_v1beta1/proto/data_stats_pb2.py @@ -20,6 +20,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/data_stats.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\xfd\x03\n\tDataStats\x12\x42\n\rfloat64_stats\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.Float64StatsH\x00\x12@\n\x0cstring_stats\x18\x04 \x01(\x0b\x32(.google.cloud.automl.v1beta1.StringStatsH\x00\x12\x46\n\x0ftimestamp_stats\x18\x05 \x01(\x0b\x32+.google.cloud.automl.v1beta1.TimestampStatsH\x00\x12>\n\x0b\x61rray_stats\x18\x06 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ArrayStatsH\x00\x12@\n\x0cstruct_stats\x18\x07 \x01(\x0b\x32(.google.cloud.automl.v1beta1.StructStatsH\x00\x12\x44\n\x0e\x63\x61tegory_stats\x18\x08 \x01(\x0b\x32*.google.cloud.automl.v1beta1.CategoryStatsH\x00\x12\x1c\n\x14\x64istinct_value_count\x18\x01 \x01(\x03\x12\x18\n\x10null_value_count\x18\x02 \x01(\x03\x12\x19\n\x11valid_value_count\x18\t \x01(\x03\x42\x07\n\x05stats"\xdd\x01\n\x0c\x46loat64Stats\x12\x0c\n\x04mean\x18\x01 \x01(\x01\x12\x1a\n\x12standard_deviation\x18\x02 \x01(\x01\x12\x11\n\tquantiles\x18\x03 \x03(\x01\x12T\n\x11histogram_buckets\x18\x04 \x03(\x0b\x32\x39.google.cloud.automl.v1beta1.Float64Stats.HistogramBucket\x1a:\n\x0fHistogramBucket\x12\x0b\n\x03min\x18\x01 \x01(\x01\x12\x0b\n\x03max\x18\x02 \x01(\x01\x12\r\n\x05\x63ount\x18\x03 \x01(\x03"\x8d\x01\n\x0bStringStats\x12P\n\x11top_unigram_stats\x18\x01 \x03(\x0b\x32\x35.google.cloud.automl.v1beta1.StringStats.UnigramStats\x1a,\n\x0cUnigramStats\x12\r\n\x05value\x18\x01 \x01(\t\x12\r\n\x05\x63ount\x18\x02 \x01(\x03"\xf4\x02\n\x0eTimestampStats\x12V\n\x0egranular_stats\x18\x01 \x03(\x0b\x32>.google.cloud.automl.v1beta1.TimestampStats.GranularStatsEntry\x1a\x98\x01\n\rGranularStats\x12W\n\x07\x62uckets\x18\x01 \x03(\x0b\x32\x46.google.cloud.automl.v1beta1.TimestampStats.GranularStats.BucketsEntry\x1a.\n\x0c\x42ucketsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1ao\n\x12GranularStatsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12H\n\x05value\x18\x02 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TimestampStats.GranularStats:\x02\x38\x01"J\n\nArrayStats\x12<\n\x0cmember_stats\x18\x02 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats"\xb7\x01\n\x0bStructStats\x12M\n\x0b\x66ield_stats\x18\x01 \x03(\x0b\x32\x38.google.cloud.automl.v1beta1.StructStats.FieldStatsEntry\x1aY\n\x0f\x46ieldStatsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats:\x02\x38\x01"\xa0\x01\n\rCategoryStats\x12Z\n\x12top_category_stats\x18\x01 \x03(\x0b\x32>.google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats\x1a\x33\n\x13SingleCategoryStats\x12\r\n\x05value\x18\x01 \x01(\t\x12\r\n\x05\x63ount\x18\x02 \x01(\x03"%\n\x10\x43orrelationStats\x12\x11\n\tcramers_v\x18\x01 \x01(\x01\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], ) @@ -31,6 +32,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="float64_stats", @@ -49,6 +51,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="string_stats", @@ -67,6 +70,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="timestamp_stats", @@ -85,6 +89,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="array_stats", @@ -103,6 +108,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="struct_stats", @@ -121,6 +127,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="category_stats", @@ -139,6 +146,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="distinct_value_count", @@ -157,6 +165,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="null_value_count", @@ -175,6 +184,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="valid_value_count", @@ -193,6 +203,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -208,6 +219,7 @@ full_name="google.cloud.automl.v1beta1.DataStats.stats", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -222,6 +234,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="min", @@ -240,6 +253,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="max", @@ -258,6 +272,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="count", @@ -276,6 +291,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -296,6 +312,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="mean", @@ -314,6 +331,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="standard_deviation", @@ -332,6 +350,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="quantiles", @@ -350,6 +369,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="histogram_buckets", @@ -368,6 +388,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -389,6 +410,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="value", @@ -407,6 +429,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="count", @@ -425,6 +448,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -445,6 +469,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="top_unigram_stats", @@ -463,6 +488,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -484,6 +510,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -502,6 +529,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -520,6 +548,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -540,6 +569,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="buckets", @@ -558,6 +588,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -578,6 +609,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -596,6 +628,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -614,6 +647,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -634,6 +668,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="granular_stats", @@ -652,6 +687,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -673,6 +709,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="member_stats", @@ -691,6 +728,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -712,6 +750,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -730,6 +769,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -748,6 +788,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -768,6 +809,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="field_stats", @@ -786,6 +828,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -807,6 +850,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="value", @@ -825,6 +869,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="count", @@ -843,6 +888,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -863,6 +909,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="top_category_stats", @@ -881,6 +928,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -902,6 +950,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="cramers_v", @@ -920,6 +969,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/data_types_pb2.py b/google/cloud/automl_v1beta1/proto/data_types_pb2.py index adc5227d..d51d15fe 100644 --- a/google/cloud/automl_v1beta1/proto/data_types_pb2.py +++ b/google/cloud/automl_v1beta1/proto/data_types_pb2.py @@ -21,6 +21,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/data_types.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\xfc\x01\n\x08\x44\x61taType\x12\x42\n\x11list_element_type\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataTypeH\x00\x12>\n\x0bstruct_type\x18\x03 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.StructTypeH\x00\x12\x15\n\x0btime_format\x18\x05 \x01(\tH\x00\x12\x38\n\ttype_code\x18\x01 \x01(\x0e\x32%.google.cloud.automl.v1beta1.TypeCode\x12\x10\n\x08nullable\x18\x04 \x01(\x08\x42\t\n\x07\x64\x65tails"\xa7\x01\n\nStructType\x12\x43\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x33.google.cloud.automl.v1beta1.StructType.FieldsEntry\x1aT\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataType:\x02\x38\x01*r\n\x08TypeCode\x12\x19\n\x15TYPE_CODE_UNSPECIFIED\x10\x00\x12\x0b\n\x07\x46LOAT64\x10\x03\x12\r\n\tTIMESTAMP\x10\x04\x12\n\n\x06STRING\x10\x06\x12\t\n\x05\x41RRAY\x10\x08\x12\n\n\x06STRUCT\x10\t\x12\x0c\n\x08\x43\x41TEGORY\x10\nB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], ) @@ -30,6 +31,7 @@ full_name="google.cloud.automl.v1beta1.TypeCode", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="TYPE_CODE_UNSPECIFIED", @@ -37,24 +39,55 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FLOAT64", index=1, number=3, serialized_options=None, type=None + name="FLOAT64", + index=1, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TIMESTAMP", index=2, number=4, serialized_options=None, type=None + name="TIMESTAMP", + index=2, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="STRING", index=3, number=6, serialized_options=None, type=None + name="STRING", + index=3, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ARRAY", index=4, number=8, serialized_options=None, type=None + name="ARRAY", + index=4, + number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="STRUCT", index=5, number=9, serialized_options=None, type=None + name="STRUCT", + index=5, + number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CATEGORY", index=6, number=10, serialized_options=None, type=None + name="CATEGORY", + index=6, + number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -80,6 +113,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="list_element_type", @@ -98,6 +132,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="struct_type", @@ -116,6 +151,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_format", @@ -134,6 +170,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="type_code", @@ -152,6 +189,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="nullable", @@ -170,6 +208,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -185,6 +224,7 @@ full_name="google.cloud.automl.v1beta1.DataType.details", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -199,6 +239,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -217,6 +258,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -235,6 +277,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -255,6 +298,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="fields", @@ -273,6 +317,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/dataset_pb2.py b/google/cloud/automl_v1beta1/proto/dataset_pb2.py index 0b2de618..fee5459e 100644 --- a/google/cloud/automl_v1beta1/proto/dataset_pb2.py +++ b/google/cloud/automl_v1beta1/proto/dataset_pb2.py @@ -37,6 +37,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b"\n/google/cloud/automl_v1beta1/proto/dataset.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a,google/cloud/automl_v1beta1/proto/text.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a-google/cloud/automl_v1beta1/proto/video.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto\"\xce\t\n\x07\x44\x61taset\x12_\n\x1ctranslation_dataset_metadata\x18\x17 \x01(\x0b\x32\x37.google.cloud.automl.v1beta1.TranslationDatasetMetadataH\x00\x12p\n%image_classification_dataset_metadata\x18\x18 \x01(\x0b\x32?.google.cloud.automl.v1beta1.ImageClassificationDatasetMetadataH\x00\x12n\n$text_classification_dataset_metadata\x18\x19 \x01(\x0b\x32>.google.cloud.automl.v1beta1.TextClassificationDatasetMetadataH\x00\x12s\n'image_object_detection_dataset_metadata\x18\x1a \x01(\x0b\x32@.google.cloud.automl.v1beta1.ImageObjectDetectionDatasetMetadataH\x00\x12p\n%video_classification_dataset_metadata\x18\x1f \x01(\x0b\x32?.google.cloud.automl.v1beta1.VideoClassificationDatasetMetadataH\x00\x12q\n&video_object_tracking_dataset_metadata\x18\x1d \x01(\x0b\x32?.google.cloud.automl.v1beta1.VideoObjectTrackingDatasetMetadataH\x00\x12\x66\n text_extraction_dataset_metadata\x18\x1c \x01(\x0b\x32:.google.cloud.automl.v1beta1.TextExtractionDatasetMetadataH\x00\x12\x64\n\x1ftext_sentiment_dataset_metadata\x18\x1e \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TextSentimentDatasetMetadataH\x00\x12U\n\x17tables_dataset_metadata\x18! \x01(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesDatasetMetadataH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\x15\n\rexample_count\x18\x15 \x01(\x05\x12/\n\x0b\x63reate_time\x18\x0e \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04\x65tag\x18\x11 \x01(\t:^\xea\x41[\n\x1d\x61utoml.googleapis.com/Dataset\x12:projects/{project}/locations/{location}/datasets/{dataset}B\x12\n\x10\x64\x61taset_metadataB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3", dependencies=[ google_dot_api_dot_resource__pb2.DESCRIPTOR, @@ -57,6 +58,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="translation_dataset_metadata", @@ -75,6 +77,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="image_classification_dataset_metadata", @@ -93,6 +96,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_classification_dataset_metadata", @@ -111,6 +115,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="image_object_detection_dataset_metadata", @@ -129,6 +134,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_classification_dataset_metadata", @@ -147,6 +153,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_object_tracking_dataset_metadata", @@ -165,6 +172,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_extraction_dataset_metadata", @@ -183,6 +191,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_sentiment_dataset_metadata", @@ -201,6 +210,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tables_dataset_metadata", @@ -219,6 +229,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="name", @@ -237,6 +248,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="display_name", @@ -255,6 +267,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="description", @@ -273,6 +286,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="example_count", @@ -291,6 +305,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="create_time", @@ -309,6 +324,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="etag", @@ -327,6 +343,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -342,6 +359,7 @@ full_name="google.cloud.automl.v1beta1.Dataset.dataset_metadata", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], diff --git a/google/cloud/automl_v1beta1/proto/detection_pb2.py b/google/cloud/automl_v1beta1/proto/detection_pb2.py index e64aced2..21f66e5f 100644 --- a/google/cloud/automl_v1beta1/proto/detection_pb2.py +++ b/google/cloud/automl_v1beta1/proto/detection_pb2.py @@ -24,6 +24,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n1google/cloud/automl_v1beta1/proto/detection.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x30google/cloud/automl_v1beta1/proto/geometry.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/api/annotations.proto"p\n\x1eImageObjectDetectionAnnotation\x12?\n\x0c\x62ounding_box\x18\x01 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12\r\n\x05score\x18\x02 \x01(\x02"\xb4\x01\n\x1dVideoObjectTrackingAnnotation\x12\x13\n\x0binstance_id\x18\x01 \x01(\t\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12?\n\x0c\x62ounding_box\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12\r\n\x05score\x18\x04 \x01(\x02"\xae\x02\n\x17\x42oundingBoxMetricsEntry\x12\x15\n\riou_threshold\x18\x01 \x01(\x02\x12\x1e\n\x16mean_average_precision\x18\x02 \x01(\x02\x12o\n\x1a\x63onfidence_metrics_entries\x18\x03 \x03(\x0b\x32K.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.ConfidenceMetricsEntry\x1ak\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x11\n\tprecision\x18\x03 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x04 \x01(\x02"\xd6\x01\n%ImageObjectDetectionEvaluationMetrics\x12$\n\x1c\x65valuated_bounding_box_count\x18\x01 \x01(\x05\x12Z\n\x1c\x62ounding_box_metrics_entries\x18\x02 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry\x12+\n#bounding_box_mean_average_precision\x18\x03 \x01(\x02"\xf4\x01\n$VideoObjectTrackingEvaluationMetrics\x12\x1d\n\x15\x65valuated_frame_count\x18\x01 \x01(\x05\x12$\n\x1c\x65valuated_bounding_box_count\x18\x02 \x01(\x05\x12Z\n\x1c\x62ounding_box_metrics_entries\x18\x04 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry\x12+\n#bounding_box_mean_average_precision\x18\x06 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_geometry__pb2.DESCRIPTOR, @@ -39,6 +40,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="bounding_box", @@ -57,6 +59,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="score", @@ -75,6 +78,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -96,6 +100,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="instance_id", @@ -114,6 +119,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_offset", @@ -132,6 +138,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bounding_box", @@ -150,6 +157,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="score", @@ -168,6 +176,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -189,6 +198,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="confidence_threshold", @@ -207,6 +217,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="recall", @@ -225,6 +236,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="precision", @@ -243,6 +255,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="f1_score", @@ -261,6 +274,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -281,6 +295,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="iou_threshold", @@ -299,6 +314,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mean_average_precision", @@ -317,6 +333,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence_metrics_entries", @@ -335,6 +352,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -356,6 +374,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="evaluated_bounding_box_count", @@ -374,6 +393,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bounding_box_metrics_entries", @@ -392,6 +412,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bounding_box_mean_average_precision", @@ -410,6 +431,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -431,6 +453,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="evaluated_frame_count", @@ -449,6 +472,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="evaluated_bounding_box_count", @@ -467,6 +491,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bounding_box_metrics_entries", @@ -485,6 +510,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bounding_box_mean_average_precision", @@ -503,6 +529,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/geometry_pb2.py b/google/cloud/automl_v1beta1/proto/geometry_pb2.py index e4164610..6d06a5fa 100644 --- a/google/cloud/automl_v1beta1/proto/geometry_pb2.py +++ b/google/cloud/automl_v1beta1/proto/geometry_pb2.py @@ -20,6 +20,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n0google/cloud/automl_v1beta1/proto/geometry.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"Z\n\x0c\x42oundingPoly\x12J\n\x13normalized_vertices\x18\x02 \x03(\x0b\x32-.google.cloud.automl.v1beta1.NormalizedVertexB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], ) @@ -31,6 +32,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="x", @@ -49,6 +51,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="y", @@ -67,6 +70,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -88,6 +92,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="normalized_vertices", @@ -106,6 +111,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/image_pb2.py b/google/cloud/automl_v1beta1/proto/image_pb2.py index 24551f30..fe782edd 100644 --- a/google/cloud/automl_v1beta1/proto/image_pb2.py +++ b/google/cloud/automl_v1beta1/proto/image_pb2.py @@ -28,6 +28,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\nImageProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n-google/cloud/automl_v1beta1/proto/image.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a\x37google/cloud/automl_v1beta1/proto/annotation_spec.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"r\n"ImageClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"%\n#ImageObjectDetectionDatasetMetadata"\xb2\x01\n ImageClassificationModelMetadata\x12\x15\n\rbase_model_id\x18\x01 \x01(\t\x12\x14\n\x0ctrain_budget\x18\x02 \x01(\x03\x12\x12\n\ntrain_cost\x18\x03 \x01(\x03\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12\x12\n\nmodel_type\x18\x07 \x01(\t\x12\x10\n\x08node_qps\x18\r \x01(\x01\x12\x12\n\nnode_count\x18\x0e \x01(\x03"\xbe\x01\n!ImageObjectDetectionModelMetadata\x12\x12\n\nmodel_type\x18\x01 \x01(\t\x12\x12\n\nnode_count\x18\x03 \x01(\x03\x12\x10\n\x08node_qps\x18\x04 \x01(\x01\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03"@\n*ImageClassificationModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03"A\n+ImageObjectDetectionModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03\x42\xb1\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\nImageProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_api_dot_resource__pb2.DESCRIPTOR, @@ -45,6 +46,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="classification_type", @@ -63,6 +65,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -84,6 +87,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -104,6 +108,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="base_model_id", @@ -122,6 +127,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="train_budget", @@ -140,6 +146,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="train_cost", @@ -158,6 +165,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="stop_reason", @@ -176,6 +184,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model_type", @@ -194,6 +203,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="node_qps", @@ -212,6 +222,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="node_count", @@ -230,6 +241,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -251,6 +263,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model_type", @@ -269,6 +282,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="node_count", @@ -287,6 +301,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="node_qps", @@ -305,6 +320,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="stop_reason", @@ -323,6 +339,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="train_budget_milli_node_hours", @@ -341,6 +358,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="train_cost_milli_node_hours", @@ -359,6 +377,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -380,6 +399,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="node_count", @@ -398,6 +418,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -419,6 +440,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="node_count", @@ -437,6 +459,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/io_pb2.py b/google/cloud/automl_v1beta1/proto/io_pb2.py index cc4c8305..8cfdba9f 100644 --- a/google/cloud/automl_v1beta1/proto/io_pb2.py +++ b/google/cloud/automl_v1beta1/proto/io_pb2.py @@ -20,6 +20,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n*google/cloud/automl_v1beta1/proto/io.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\x92\x02\n\x0bInputConfig\x12<\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSourceH\x00\x12\x46\n\x0f\x62igquery_source\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.BigQuerySourceH\x00\x12\x44\n\x06params\x18\x02 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.InputConfig.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06source"\xa9\x01\n\x17\x42\x61tchPredictInputConfig\x12<\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSourceH\x00\x12\x46\n\x0f\x62igquery_source\x18\x02 \x01(\x0b\x32+.google.cloud.automl.v1beta1.BigQuerySourceH\x00\x42\x08\n\x06source"Q\n\x13\x44ocumentInputConfig\x12:\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSource"\xb7\x01\n\x0cOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\xc3\x01\n\x18\x42\x61tchPredictOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\xcf\x02\n\x17ModelExportOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12\x46\n\x0fgcr_destination\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcrDestinationH\x00\x12\x14\n\x0cmodel_format\x18\x04 \x01(\t\x12P\n\x06params\x18\x02 \x03(\x0b\x32@.google.cloud.automl.v1beta1.ModelExportOutputConfig.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\r\n\x0b\x64\x65stination"\x86\x01\n#ExportEvaluatedExamplesOutputConfig\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\x1f\n\tGcsSource\x12\x12\n\ninput_uris\x18\x01 \x03(\t"#\n\x0e\x42igQuerySource\x12\x11\n\tinput_uri\x18\x01 \x01(\t"+\n\x0eGcsDestination\x12\x19\n\x11output_uri_prefix\x18\x01 \x01(\t")\n\x13\x42igQueryDestination\x12\x12\n\noutput_uri\x18\x01 \x01(\t"$\n\x0eGcrDestination\x12\x12\n\noutput_uri\x18\x01 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], ) @@ -31,6 +32,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -49,6 +51,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -67,6 +70,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -87,6 +91,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_source", @@ -105,6 +110,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bigquery_source", @@ -123,6 +129,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="params", @@ -141,6 +148,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -156,6 +164,7 @@ full_name="google.cloud.automl.v1beta1.InputConfig.source", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -170,6 +179,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_source", @@ -188,6 +198,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bigquery_source", @@ -206,6 +217,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -221,6 +233,7 @@ full_name="google.cloud.automl.v1beta1.BatchPredictInputConfig.source", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -235,6 +248,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_source", @@ -253,6 +267,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -274,6 +289,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_destination", @@ -292,6 +308,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bigquery_destination", @@ -310,6 +327,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -325,6 +343,7 @@ full_name="google.cloud.automl.v1beta1.OutputConfig.destination", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -339,6 +358,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_destination", @@ -357,6 +377,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bigquery_destination", @@ -375,6 +396,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -390,6 +412,7 @@ full_name="google.cloud.automl.v1beta1.BatchPredictOutputConfig.destination", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -404,6 +427,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -422,6 +446,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -440,6 +465,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -460,6 +486,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_destination", @@ -478,6 +505,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="gcr_destination", @@ -496,6 +524,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model_format", @@ -514,6 +543,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="params", @@ -532,6 +562,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -547,6 +578,7 @@ full_name="google.cloud.automl.v1beta1.ModelExportOutputConfig.destination", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -561,6 +593,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="bigquery_destination", @@ -579,6 +612,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -594,6 +628,7 @@ full_name="google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig.destination", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -608,6 +643,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uris", @@ -626,6 +662,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -647,6 +684,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -665,6 +703,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -686,6 +725,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="output_uri_prefix", @@ -704,6 +744,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -725,6 +766,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="output_uri", @@ -743,6 +785,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -764,6 +807,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="output_uri", @@ -782,6 +826,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py b/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py index 4152dd72..f987190b 100644 --- a/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py +++ b/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py @@ -43,6 +43,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n8google/cloud/automl_v1beta1/proto/model_evaluation.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x31google/cloud/automl_v1beta1/proto/detection.proto\x1a\x32google/cloud/automl_v1beta1/proto/regression.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a\x37google/cloud/automl_v1beta1/proto/text_extraction.proto\x1a\x36google/cloud/automl_v1beta1/proto/text_sentiment.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xb1\x08\n\x0fModelEvaluation\x12i\n!classification_evaluation_metrics\x18\x08 \x01(\x0b\x32<.google.cloud.automl.v1beta1.ClassificationEvaluationMetricsH\x00\x12\x61\n\x1dregression_evaluation_metrics\x18\x18 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.RegressionEvaluationMetricsH\x00\x12\x63\n\x1etranslation_evaluation_metrics\x18\t \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TranslationEvaluationMetricsH\x00\x12w\n)image_object_detection_evaluation_metrics\x18\x0c \x01(\x0b\x32\x42.google.cloud.automl.v1beta1.ImageObjectDetectionEvaluationMetricsH\x00\x12u\n(video_object_tracking_evaluation_metrics\x18\x0e \x01(\x0b\x32\x41.google.cloud.automl.v1beta1.VideoObjectTrackingEvaluationMetricsH\x00\x12h\n!text_sentiment_evaluation_metrics\x18\x0b \x01(\x0b\x32;.google.cloud.automl.v1beta1.TextSentimentEvaluationMetricsH\x00\x12j\n"text_extraction_evaluation_metrics\x18\r \x01(\x0b\x32<.google.cloud.automl.v1beta1.TextExtractionEvaluationMetricsH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1a\n\x12\x61nnotation_spec_id\x18\x02 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x0f \x01(\t\x12/\n\x0b\x63reate_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x17\x65valuated_example_count\x18\x06 \x01(\x05:\x87\x01\xea\x41\x83\x01\n%automl.googleapis.com/ModelEvaluation\x12Zprojects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}B\t\n\x07metricsB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_api_dot_resource__pb2.DESCRIPTOR, @@ -65,6 +66,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="classification_evaluation_metrics", @@ -83,6 +85,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="regression_evaluation_metrics", @@ -101,6 +104,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="translation_evaluation_metrics", @@ -119,6 +123,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="image_object_detection_evaluation_metrics", @@ -137,6 +142,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_object_tracking_evaluation_metrics", @@ -155,6 +161,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_sentiment_evaluation_metrics", @@ -173,6 +180,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_extraction_evaluation_metrics", @@ -191,6 +199,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="name", @@ -209,6 +218,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="annotation_spec_id", @@ -227,6 +237,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="display_name", @@ -245,6 +256,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="create_time", @@ -263,6 +275,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="evaluated_example_count", @@ -281,6 +294,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -296,6 +310,7 @@ full_name="google.cloud.automl.v1beta1.ModelEvaluation.metrics", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], diff --git a/google/cloud/automl_v1beta1/proto/model_pb2.py b/google/cloud/automl_v1beta1/proto/model_pb2.py index d30bd8dd..2f0e369a 100644 --- a/google/cloud/automl_v1beta1/proto/model_pb2.py +++ b/google/cloud/automl_v1beta1/proto/model_pb2.py @@ -37,6 +37,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n-google/cloud/automl_v1beta1/proto/model.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a,google/cloud/automl_v1beta1/proto/text.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a-google/cloud/automl_v1beta1/proto/video.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xcc\n\n\x05Model\x12[\n\x1atranslation_model_metadata\x18\x0f \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.TranslationModelMetadataH\x00\x12l\n#image_classification_model_metadata\x18\r \x01(\x0b\x32=.google.cloud.automl.v1beta1.ImageClassificationModelMetadataH\x00\x12j\n"text_classification_model_metadata\x18\x0e \x01(\x0b\x32<.google.cloud.automl.v1beta1.TextClassificationModelMetadataH\x00\x12o\n%image_object_detection_model_metadata\x18\x14 \x01(\x0b\x32>.google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadataH\x00\x12l\n#video_classification_model_metadata\x18\x17 \x01(\x0b\x32=.google.cloud.automl.v1beta1.VideoClassificationModelMetadataH\x00\x12m\n$video_object_tracking_model_metadata\x18\x15 \x01(\x0b\x32=.google.cloud.automl.v1beta1.VideoObjectTrackingModelMetadataH\x00\x12\x62\n\x1etext_extraction_model_metadata\x18\x13 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.TextExtractionModelMetadataH\x00\x12Q\n\x15tables_model_metadata\x18\x18 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.TablesModelMetadataH\x00\x12`\n\x1dtext_sentiment_model_metadata\x18\x16 \x01(\x0b\x32\x37.google.cloud.automl.v1beta1.TextSentimentModelMetadataH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x12\n\ndataset_id\x18\x03 \x01(\t\x12/\n\x0b\x63reate_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12L\n\x10\x64\x65ployment_state\x18\x08 \x01(\x0e\x32\x32.google.cloud.automl.v1beta1.Model.DeploymentState"Q\n\x0f\x44\x65ploymentState\x12 \n\x1c\x44\x45PLOYMENT_STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x44\x45PLOYED\x10\x01\x12\x0e\n\nUNDEPLOYED\x10\x02:X\xea\x41U\n\x1b\x61utoml.googleapis.com/Model\x12\x36projects/{project}/locations/{location}/models/{model}B\x10\n\x0emodel_metadataB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_api_dot_resource__pb2.DESCRIPTOR, @@ -56,6 +57,7 @@ full_name="google.cloud.automl.v1beta1.Model.DeploymentState", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="DEPLOYMENT_STATE_UNSPECIFIED", @@ -63,12 +65,23 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DEPLOYED", index=1, number=1, serialized_options=None, type=None + name="DEPLOYED", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="UNDEPLOYED", index=2, number=2, serialized_options=None, type=None + name="UNDEPLOYED", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -85,6 +98,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="translation_model_metadata", @@ -103,6 +117,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="image_classification_model_metadata", @@ -121,6 +136,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_classification_model_metadata", @@ -139,6 +155,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="image_object_detection_model_metadata", @@ -157,6 +174,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_classification_model_metadata", @@ -175,6 +193,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_object_tracking_model_metadata", @@ -193,6 +212,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_extraction_model_metadata", @@ -211,6 +231,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tables_model_metadata", @@ -229,6 +250,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_sentiment_model_metadata", @@ -247,6 +269,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="name", @@ -265,6 +288,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="display_name", @@ -283,6 +307,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="dataset_id", @@ -301,6 +326,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="create_time", @@ -319,6 +345,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_time", @@ -337,6 +364,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="deployment_state", @@ -355,6 +383,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -370,6 +399,7 @@ full_name="google.cloud.automl.v1beta1.Model.model_metadata", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], diff --git a/google/cloud/automl_v1beta1/proto/operations_pb2.py b/google/cloud/automl_v1beta1/proto/operations_pb2.py index 212b1aaa..ee32f2aa 100644 --- a/google/cloud/automl_v1beta1/proto/operations_pb2.py +++ b/google/cloud/automl_v1beta1/proto/operations_pb2.py @@ -32,6 +32,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/operations.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a-google/cloud/automl_v1beta1/proto/model.proto\x1a\x38google/cloud/automl_v1beta1/proto/model_evaluation.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x1cgoogle/api/annotations.proto"\x8b\x08\n\x11OperationMetadata\x12N\n\x0e\x64\x65lete_details\x18\x08 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.DeleteOperationMetadataH\x00\x12Y\n\x14\x64\x65ploy_model_details\x18\x18 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.DeployModelOperationMetadataH\x00\x12]\n\x16undeploy_model_details\x18\x19 \x01(\x0b\x32;.google.cloud.automl.v1beta1.UndeployModelOperationMetadataH\x00\x12Y\n\x14\x63reate_model_details\x18\n \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.CreateModelOperationMetadataH\x00\x12W\n\x13import_data_details\x18\x0f \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ImportDataOperationMetadataH\x00\x12[\n\x15\x62\x61tch_predict_details\x18\x10 \x01(\x0b\x32:.google.cloud.automl.v1beta1.BatchPredictOperationMetadataH\x00\x12W\n\x13\x65xport_data_details\x18\x15 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ExportDataOperationMetadataH\x00\x12Y\n\x14\x65xport_model_details\x18\x16 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.ExportModelOperationMetadataH\x00\x12r\n!export_evaluated_examples_details\x18\x1a \x01(\x0b\x32\x45.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadataH\x00\x12\x18\n\x10progress_percent\x18\r \x01(\x05\x12,\n\x10partial_failures\x18\x02 \x03(\x0b\x32\x12.google.rpc.Status\x12/\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\t\n\x07\x64\x65tails"\x19\n\x17\x44\x65leteOperationMetadata"\x1e\n\x1c\x44\x65ployModelOperationMetadata" \n\x1eUndeployModelOperationMetadata"\x1e\n\x1c\x43reateModelOperationMetadata"\x1d\n\x1bImportDataOperationMetadata"\xef\x01\n\x1b\x45xportDataOperationMetadata\x12\x62\n\x0boutput_info\x18\x01 \x01(\x0b\x32M.google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo\x1al\n\x14\x45xportDataOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xc3\x02\n\x1d\x42\x61tchPredictOperationMetadata\x12J\n\x0cinput_config\x18\x01 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.BatchPredictInputConfig\x12\x66\n\x0boutput_info\x18\x02 \x01(\x0b\x32Q.google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo\x1an\n\x16\x42\x61tchPredictOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xbb\x01\n\x1c\x45xportModelOperationMetadata\x12\x64\n\x0boutput_info\x18\x02 \x01(\x0b\x32O.google.cloud.automl.v1beta1.ExportModelOperationMetadata.ExportModelOutputInfo\x1a\x35\n\x15\x45xportModelOutputInfo\x12\x1c\n\x14gcs_output_directory\x18\x01 \x01(\t"\xee\x01\n(ExportEvaluatedExamplesOperationMetadata\x12|\n\x0boutput_info\x18\x02 \x01(\x0b\x32g.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo\x1a\x44\n!ExportEvaluatedExamplesOutputInfo\x12\x1f\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2.DESCRIPTOR, @@ -51,6 +52,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="delete_details", @@ -69,6 +71,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="deploy_model_details", @@ -87,6 +90,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="undeploy_model_details", @@ -105,6 +109,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="create_model_details", @@ -123,6 +128,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="import_data_details", @@ -141,6 +147,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="batch_predict_details", @@ -159,6 +166,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="export_data_details", @@ -177,6 +185,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="export_model_details", @@ -195,6 +204,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="export_evaluated_examples_details", @@ -213,6 +223,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="progress_percent", @@ -231,6 +242,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="partial_failures", @@ -249,6 +261,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="create_time", @@ -267,6 +280,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_time", @@ -285,6 +299,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -300,6 +315,7 @@ full_name="google.cloud.automl.v1beta1.OperationMetadata.details", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -314,6 +330,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -334,6 +351,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -354,6 +372,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -374,6 +393,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -394,6 +414,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -414,6 +435,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_output_directory", @@ -432,6 +454,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bigquery_output_dataset", @@ -450,6 +473,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -465,6 +489,7 @@ full_name="google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo.output_location", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -478,6 +503,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="output_info", @@ -496,6 +522,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -517,6 +544,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_output_directory", @@ -535,6 +563,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bigquery_output_dataset", @@ -553,6 +582,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -568,6 +598,7 @@ full_name="google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo.output_location", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -581,6 +612,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_config", @@ -599,6 +631,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="output_info", @@ -617,6 +650,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -638,6 +672,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_output_directory", @@ -656,6 +691,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -676,6 +712,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="output_info", @@ -694,6 +731,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -715,6 +753,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="bigquery_output_dataset", @@ -733,6 +772,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -753,6 +793,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="output_info", @@ -771,6 +812,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py b/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py index 56ac149c..248338d5 100644 --- a/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py +++ b/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py @@ -38,6 +38,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\026PredictionServiceProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n:google/cloud/automl_v1beta1/proto/prediction_service.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a:google/cloud/automl_v1beta1/proto/annotation_payload.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x32google/cloud/automl_v1beta1/proto/operations.proto\x1a#google/longrunning/operations.proto"\xfe\x01\n\x0ePredictRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12\x41\n\x07payload\x18\x02 \x01(\x0b\x32+.google.cloud.automl.v1beta1.ExamplePayloadB\x03\xe0\x41\x02\x12G\n\x06params\x18\x03 \x03(\x0b\x32\x37.google.cloud.automl.v1beta1.PredictRequest.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x9a\x02\n\x0fPredictResponse\x12?\n\x07payload\x18\x01 \x03(\x0b\x32..google.cloud.automl.v1beta1.AnnotationPayload\x12G\n\x12preprocessed_input\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.ExamplePayload\x12L\n\x08metadata\x18\x02 \x03(\x0b\x32:.google.cloud.automl.v1beta1.PredictResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xee\x02\n\x13\x42\x61tchPredictRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12O\n\x0cinput_config\x18\x03 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.BatchPredictInputConfigB\x03\xe0\x41\x02\x12Q\n\routput_config\x18\x04 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.BatchPredictOutputConfigB\x03\xe0\x41\x02\x12Q\n\x06params\x18\x05 \x03(\x0b\x32<.google.cloud.automl.v1beta1.BatchPredictRequest.ParamsEntryB\x03\xe0\x41\x02\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x96\x01\n\x12\x42\x61tchPredictResult\x12O\n\x08metadata\x18\x01 \x03(\x0b\x32=.google.cloud.automl.v1beta1.BatchPredictResult.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x32\x9e\x04\n\x11PredictionService\x12\xbe\x01\n\x07Predict\x12+.google.cloud.automl.v1beta1.PredictRequest\x1a,.google.cloud.automl.v1beta1.PredictResponse"X\x82\xd3\xe4\x93\x02<"7/v1beta1/{name=projects/*/locations/*/models/*}:predict:\x01*\xda\x41\x13name,payload,params\x12\xfc\x01\n\x0c\x42\x61tchPredict\x12\x30.google.cloud.automl.v1beta1.BatchPredictRequest\x1a\x1d.google.longrunning.Operation"\x9a\x01\x82\xd3\xe4\x93\x02\x41"/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}\xda\x41\x04name\x12\xca\x01\n\x0eListTableSpecs\x12\x32.google.cloud.automl.v1beta1.ListTableSpecsRequest\x1a\x33.google.cloud.automl.v1beta1.ListTableSpecsResponse"O\x82\xd3\xe4\x93\x02@\x12>/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs\xda\x41\x06parent\x12\xda\x01\n\x0fUpdateTableSpec\x12\x33.google.cloud.automl.v1beta1.UpdateTableSpecRequest\x1a&.google.cloud.automl.v1beta1.TableSpec"j\x82\xd3\xe4\x93\x02W2I/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}:\ntable_spec\xda\x41\ntable_spec\x12\xc8\x01\n\rGetColumnSpec\x12\x31.google.cloud.automl.v1beta1.GetColumnSpecRequest\x1a\'.google.cloud.automl.v1beta1.ColumnSpec"[\x82\xd3\xe4\x93\x02N\x12L/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}\xda\x41\x04name\x12\xdb\x01\n\x0fListColumnSpecs\x12\x33.google.cloud.automl.v1beta1.ListColumnSpecsRequest\x1a\x34.google.cloud.automl.v1beta1.ListColumnSpecsResponse"]\x82\xd3\xe4\x93\x02N\x12L/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs\xda\x41\x06parent\x12\xee\x01\n\x10UpdateColumnSpec\x12\x34.google.cloud.automl.v1beta1.UpdateColumnSpecRequest\x1a\'.google.cloud.automl.v1beta1.ColumnSpec"{\x82\xd3\xe4\x93\x02g2X/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}:\x0b\x63olumn_spec\xda\x41\x0b\x63olumn_spec\x12\xc9\x01\n\x0b\x43reateModel\x12/.google.cloud.automl.v1beta1.CreateModelRequest\x1a\x1d.google.longrunning.Operation"j\x82\xd3\xe4\x93\x02\x38"//v1beta1/{parent=projects/*/locations/*}/models:\x05model\xda\x41\x0cparent,model\xca\x41\x1a\n\x05Model\x12\x11OperationMetadata\x12\x9c\x01\n\x08GetModel\x12,.google.cloud.automl.v1beta1.GetModelRequest\x1a".google.cloud.automl.v1beta1.Model">\x82\xd3\xe4\x93\x02\x31\x12//v1beta1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\x12\xaf\x01\n\nListModels\x12..google.cloud.automl.v1beta1.ListModelsRequest\x1a/.google.cloud.automl.v1beta1.ListModelsResponse"@\x82\xd3\xe4\x93\x02\x31\x12//v1beta1/{parent=projects/*/locations/*}/models\xda\x41\x06parent\x12\xca\x01\n\x0b\x44\x65leteModel\x12/.google.cloud.automl.v1beta1.DeleteModelRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x31*//v1beta1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xd4\x01\n\x0b\x44\x65ployModel\x12/.google.cloud.automl.v1beta1.DeployModelRequest\x1a\x1d.google.longrunning.Operation"u\x82\xd3\xe4\x93\x02;"6/v1beta1/{name=projects/*/locations/*/models/*}:deploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xda\x01\n\rUndeployModel\x12\x31.google.cloud.automl.v1beta1.UndeployModelRequest\x1a\x1d.google.longrunning.Operation"w\x82\xd3\xe4\x93\x02="8/v1beta1/{name=projects/*/locations/*/models/*}:undeploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xe3\x01\n\x0b\x45xportModel\x12/.google.cloud.automl.v1beta1.ExportModelRequest\x1a\x1d.google.longrunning.Operation"\x83\x01\x82\xd3\xe4\x93\x02;"6/v1beta1/{name=projects/*/locations/*/models/*}:export:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\x8c\x02\n\x17\x45xportEvaluatedExamples\x12;.google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest\x1a\x1d.google.longrunning.Operation"\x94\x01\x82\xd3\xe4\x93\x02L"G/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xcd\x01\n\x12GetModelEvaluation\x12\x36.google.cloud.automl.v1beta1.GetModelEvaluationRequest\x1a,.google.cloud.automl.v1beta1.ModelEvaluation"Q\x82\xd3\xe4\x93\x02\x44\x12\x42/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\xda\x41\x04name\x12\xe0\x01\n\x14ListModelEvaluations\x12\x38.google.cloud.automl.v1beta1.ListModelEvaluationsRequest\x1a\x39.google.cloud.automl.v1beta1.ListModelEvaluationsResponse"S\x82\xd3\xe4\x93\x02\x44\x12\x42/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations\xda\x41\x06parent\x1aI\xca\x41\x15\x61utoml.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xb2\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x0b\x41utoMlProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -85,6 +86,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -103,6 +105,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="dataset", @@ -121,6 +124,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -142,6 +146,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -160,6 +165,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -181,6 +187,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -199,6 +206,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="filter", @@ -217,6 +225,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_size", @@ -235,6 +244,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_token", @@ -253,6 +263,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -274,6 +285,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="datasets", @@ -292,6 +304,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="next_page_token", @@ -310,6 +323,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -331,6 +345,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="dataset", @@ -349,6 +364,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_mask", @@ -367,6 +383,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -388,6 +405,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -406,6 +424,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -427,6 +446,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -445,6 +465,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_config", @@ -463,6 +484,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -484,6 +506,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -502,6 +525,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="output_config", @@ -520,6 +544,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -541,6 +566,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -559,6 +585,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A&\n$automl.googleapis.com/AnnotationSpec", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -580,6 +607,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -598,6 +626,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A!\n\037automl.googleapis.com/TableSpec", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="field_mask", @@ -616,6 +645,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -637,6 +667,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -655,6 +686,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="field_mask", @@ -673,6 +705,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="filter", @@ -691,6 +724,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_size", @@ -709,6 +743,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_token", @@ -727,6 +762,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -748,6 +784,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="table_specs", @@ -766,6 +803,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="next_page_token", @@ -784,6 +822,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -805,6 +844,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="table_spec", @@ -823,6 +863,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_mask", @@ -841,6 +882,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -862,6 +904,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -880,6 +923,7 @@ extension_scope=None, serialized_options=b'\340A\002\372A"\n automl.googleapis.com/ColumnSpec', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="field_mask", @@ -898,6 +942,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -919,6 +964,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -937,6 +983,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A!\n\037automl.googleapis.com/TableSpec", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="field_mask", @@ -955,6 +1002,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="filter", @@ -973,6 +1021,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_size", @@ -991,6 +1040,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_token", @@ -1009,6 +1059,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1030,6 +1081,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="column_specs", @@ -1048,6 +1100,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="next_page_token", @@ -1066,6 +1119,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1087,6 +1141,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="column_spec", @@ -1105,6 +1160,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_mask", @@ -1123,6 +1179,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1144,6 +1201,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -1162,6 +1220,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model", @@ -1180,6 +1239,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1201,6 +1261,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1219,6 +1280,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1240,6 +1302,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -1258,6 +1321,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="filter", @@ -1276,6 +1340,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_size", @@ -1294,6 +1359,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_token", @@ -1312,6 +1378,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1333,6 +1400,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -1351,6 +1419,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="next_page_token", @@ -1369,6 +1438,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1390,6 +1460,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1408,6 +1479,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1429,6 +1501,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="image_object_detection_model_deployment_metadata", @@ -1447,6 +1520,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="image_classification_model_deployment_metadata", @@ -1465,6 +1539,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="name", @@ -1483,6 +1558,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1498,6 +1574,7 @@ full_name="google.cloud.automl.v1beta1.DeployModelRequest.model_deployment_metadata", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -1512,6 +1589,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1530,6 +1608,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1551,6 +1630,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1569,6 +1649,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="output_config", @@ -1587,6 +1668,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1608,6 +1690,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1626,6 +1709,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="output_config", @@ -1644,6 +1728,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1665,6 +1750,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1683,6 +1769,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A'\n%automl.googleapis.com/ModelEvaluation", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1704,6 +1791,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -1722,6 +1810,7 @@ extension_scope=None, serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="filter", @@ -1740,6 +1829,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_size", @@ -1758,6 +1848,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_token", @@ -1776,6 +1867,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1797,6 +1889,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model_evaluation", @@ -1815,6 +1908,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="next_page_token", @@ -1833,6 +1927,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2673,6 +2768,7 @@ file=DESCRIPTOR, index=0, serialized_options=b"\312A\025automl.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", + create_key=_descriptor._internal_create_key, serialized_start=4628, serialized_end=9729, methods=[ @@ -2684,6 +2780,7 @@ input_type=_CREATEDATASETREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET, serialized_options=b'\202\323\344\223\002<"1/v1beta1/{parent=projects/*/locations/*}/datasets:\007dataset\332A\016parent,dataset', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetDataset", @@ -2693,6 +2790,7 @@ input_type=_GETDATASETREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET, serialized_options=b"\202\323\344\223\0023\0221/v1beta1/{name=projects/*/locations/*/datasets/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListDatasets", @@ -2702,6 +2800,7 @@ input_type=_LISTDATASETSREQUEST, output_type=_LISTDATASETSRESPONSE, serialized_options=b"\202\323\344\223\0023\0221/v1beta1/{parent=projects/*/locations/*}/datasets\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UpdateDataset", @@ -2711,6 +2810,7 @@ input_type=_UPDATEDATASETREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET, serialized_options=b"\202\323\344\223\002D29/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}:\007dataset\332A\007dataset", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeleteDataset", @@ -2720,6 +2820,7 @@ input_type=_DELETEDATASETREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b"\202\323\344\223\0023*1/v1beta1/{name=projects/*/locations/*/datasets/*}\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ImportData", @@ -2729,6 +2830,7 @@ input_type=_IMPORTDATAREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b'\202\323\344\223\002A"/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListTableSpecs", @@ -2765,6 +2870,7 @@ input_type=_LISTTABLESPECSREQUEST, output_type=_LISTTABLESPECSRESPONSE, serialized_options=b"\202\323\344\223\002@\022>/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UpdateTableSpec", @@ -2774,6 +2880,7 @@ input_type=_UPDATETABLESPECREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2._TABLESPEC, serialized_options=b"\202\323\344\223\002W2I/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}:\ntable_spec\332A\ntable_spec", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetColumnSpec", @@ -2783,6 +2890,7 @@ input_type=_GETCOLUMNSPECREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC, serialized_options=b"\202\323\344\223\002N\022L/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListColumnSpecs", @@ -2792,6 +2900,7 @@ input_type=_LISTCOLUMNSPECSREQUEST, output_type=_LISTCOLUMNSPECSRESPONSE, serialized_options=b"\202\323\344\223\002N\022L/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UpdateColumnSpec", @@ -2801,6 +2910,7 @@ input_type=_UPDATECOLUMNSPECREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC, serialized_options=b"\202\323\344\223\002g2X/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}:\013column_spec\332A\013column_spec", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="CreateModel", @@ -2810,6 +2920,7 @@ input_type=_CREATEMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b'\202\323\344\223\0028"//v1beta1/{parent=projects/*/locations/*}/models:\005model\332A\014parent,model\312A\032\n\005Model\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetModel", @@ -2819,6 +2930,7 @@ input_type=_GETMODELREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2._MODEL, serialized_options=b"\202\323\344\223\0021\022//v1beta1/{name=projects/*/locations/*/models/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListModels", @@ -2828,6 +2940,7 @@ input_type=_LISTMODELSREQUEST, output_type=_LISTMODELSRESPONSE, serialized_options=b"\202\323\344\223\0021\022//v1beta1/{parent=projects/*/locations/*}/models\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeleteModel", @@ -2837,6 +2950,7 @@ input_type=_DELETEMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b"\202\323\344\223\0021*//v1beta1/{name=projects/*/locations/*/models/*}\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeployModel", @@ -2846,6 +2960,7 @@ input_type=_DEPLOYMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b'\202\323\344\223\002;"6/v1beta1/{name=projects/*/locations/*/models/*}:deploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UndeployModel", @@ -2855,6 +2970,7 @@ input_type=_UNDEPLOYMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b'\202\323\344\223\002="8/v1beta1/{name=projects/*/locations/*/models/*}:undeploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ExportModel", @@ -2864,6 +2980,7 @@ input_type=_EXPORTMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b'\202\323\344\223\002;"6/v1beta1/{name=projects/*/locations/*/models/*}:export:\001*\332A\022name,output_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ExportEvaluatedExamples", @@ -2873,6 +2990,7 @@ input_type=_EXPORTEVALUATEDEXAMPLESREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b'\202\323\344\223\002L"G/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples:\001*\332A\022name,output_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetModelEvaluation", @@ -2882,6 +3000,7 @@ input_type=_GETMODELEVALUATIONREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2._MODELEVALUATION, serialized_options=b"\202\323\344\223\002D\022B/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListModelEvaluations", @@ -2891,6 +3010,7 @@ input_type=_LISTMODELEVALUATIONSREQUEST, output_type=_LISTMODELEVALUATIONSRESPONSE, serialized_options=b"\202\323\344\223\002D\022B/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations\332A\006parent", + create_key=_descriptor._internal_create_key, ), ], ) diff --git a/google/cloud/automl_v1beta1/proto/table_spec_pb2.py b/google/cloud/automl_v1beta1/proto/table_spec_pb2.py index 9c8f872e..66d30724 100644 --- a/google/cloud/automl_v1beta1/proto/table_spec_pb2.py +++ b/google/cloud/automl_v1beta1/proto/table_spec_pb2.py @@ -24,6 +24,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/table_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x1cgoogle/api/annotations.proto"\xc1\x02\n\tTableSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1b\n\x13time_column_spec_id\x18\x02 \x01(\t\x12\x11\n\trow_count\x18\x03 \x01(\x03\x12\x17\n\x0fvalid_row_count\x18\x04 \x01(\x03\x12\x14\n\x0c\x63olumn_count\x18\x07 \x01(\x03\x12?\n\rinput_configs\x18\x05 \x03(\x0b\x32(.google.cloud.automl.v1beta1.InputConfig\x12\x0c\n\x04\x65tag\x18\x06 \x01(\t:x\xea\x41u\n\x1f\x61utoml.googleapis.com/TableSpec\x12Rprojects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}B\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_api_dot_resource__pb2.DESCRIPTOR, @@ -39,6 +40,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -57,6 +59,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_column_spec_id", @@ -75,6 +78,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="row_count", @@ -93,6 +97,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="valid_row_count", @@ -111,6 +116,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="column_count", @@ -129,6 +135,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_configs", @@ -147,6 +154,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="etag", @@ -165,6 +173,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/tables_pb2.py b/google/cloud/automl_v1beta1/proto/tables_pb2.py index 0be54c2a..364ed1b8 100644 --- a/google/cloud/automl_v1beta1/proto/tables_pb2.py +++ b/google/cloud/automl_v1beta1/proto/tables_pb2.py @@ -43,6 +43,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n.google/cloud/automl_v1beta1/proto/tables.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x33google/cloud/automl_v1beta1/proto/column_spec.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_stats.proto\x1a.google/cloud/automl_v1beta1/proto/ranges.proto\x1a\x32google/cloud/automl_v1beta1/proto/regression.proto\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xb0\x03\n\x15TablesDatasetMetadata\x12\x1d\n\x15primary_table_spec_id\x18\x01 \x01(\t\x12\x1d\n\x15target_column_spec_id\x18\x02 \x01(\t\x12\x1d\n\x15weight_column_spec_id\x18\x03 \x01(\t\x12\x1d\n\x15ml_use_column_spec_id\x18\x04 \x01(\t\x12t\n\x1atarget_column_correlations\x18\x06 \x03(\x0b\x32P.google.cloud.automl.v1beta1.TablesDatasetMetadata.TargetColumnCorrelationsEntry\x12\x35\n\x11stats_update_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1an\n\x1dTargetColumnCorrelationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12<\n\x05value\x18\x02 \x01(\x0b\x32-.google.cloud.automl.v1beta1.CorrelationStats:\x02\x38\x01"\x96\x04\n\x13TablesModelMetadata\x12-\n#optimization_objective_recall_value\x18\x11 \x01(\x02H\x00\x12\x30\n&optimization_objective_precision_value\x18\x12 \x01(\x02H\x00\x12\x43\n\x12target_column_spec\x18\x02 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12K\n\x1ainput_feature_column_specs\x18\x03 \x03(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12\x1e\n\x16optimization_objective\x18\x04 \x01(\t\x12T\n\x18tables_model_column_info\x18\x05 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03\x12\x1e\n\x16\x64isable_early_stopping\x18\x0c \x01(\x08\x42*\n(additional_optimization_objective_config"\xfd\x01\n\x10TablesAnnotation\x12\r\n\x05score\x18\x01 \x01(\x02\x12\x45\n\x13prediction_interval\x18\x04 \x01(\x0b\x32(.google.cloud.automl.v1beta1.DoubleRange\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\x12T\n\x18tables_model_column_info\x18\x03 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo\x12\x16\n\x0e\x62\x61seline_score\x18\x05 \x01(\x02"j\n\x15TablesModelColumnInfo\x12\x18\n\x10\x63olumn_spec_name\x18\x01 \x01(\t\x12\x1b\n\x13\x63olumn_display_name\x18\x02 \x01(\t\x12\x1a\n\x12\x66\x65\x61ture_importance\x18\x03 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, @@ -65,6 +66,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -83,6 +85,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -101,6 +104,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -121,6 +125,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="primary_table_spec_id", @@ -139,6 +144,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="target_column_spec_id", @@ -157,6 +163,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="weight_column_spec_id", @@ -175,6 +182,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="ml_use_column_spec_id", @@ -193,6 +201,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="target_column_correlations", @@ -211,6 +220,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="stats_update_time", @@ -229,6 +239,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -250,6 +261,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="optimization_objective_recall_value", @@ -268,6 +280,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="optimization_objective_precision_value", @@ -286,6 +299,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="target_column_spec", @@ -304,6 +318,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_feature_column_specs", @@ -322,6 +337,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="optimization_objective", @@ -340,6 +356,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tables_model_column_info", @@ -358,6 +375,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="train_budget_milli_node_hours", @@ -376,6 +394,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="train_cost_milli_node_hours", @@ -394,6 +413,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="disable_early_stopping", @@ -412,6 +432,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -427,6 +448,7 @@ full_name="google.cloud.automl.v1beta1.TablesModelMetadata.additional_optimization_objective_config", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -441,6 +463,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="score", @@ -459,6 +482,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="prediction_interval", @@ -477,6 +501,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -495,6 +520,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tables_model_column_info", @@ -513,6 +539,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="baseline_score", @@ -531,6 +558,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -552,6 +580,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="column_spec_name", @@ -570,6 +599,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="column_display_name", @@ -588,6 +618,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="feature_importance", @@ -606,6 +637,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/temporal_pb2.py b/google/cloud/automl_v1beta1/proto/temporal_pb2.py index 80e8359d..20b9f802 100644 --- a/google/cloud/automl_v1beta1/proto/temporal_pb2.py +++ b/google/cloud/automl_v1beta1/proto/temporal_pb2.py @@ -21,6 +21,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n0google/cloud/automl_v1beta1/proto/temporal.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/api/annotations.proto"w\n\x0bTimeSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, @@ -35,6 +36,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="start_time_offset", @@ -53,6 +55,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_time_offset", @@ -71,6 +74,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py b/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py index 7e7f80f1..e3efded5 100644 --- a/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py @@ -23,6 +23,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n7google/cloud/automl_v1beta1/proto/text_extraction.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x34google/cloud/automl_v1beta1/proto/text_segment.proto\x1a\x1cgoogle/api/annotations.proto"y\n\x18TextExtractionAnnotation\x12@\n\x0ctext_segment\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSegmentH\x00\x12\r\n\x05score\x18\x01 \x01(\x02\x42\x0c\n\nannotation"\x97\x02\n\x1fTextExtractionEvaluationMetrics\x12\x0e\n\x06\x61u_prc\x18\x01 \x01(\x02\x12w\n\x1a\x63onfidence_metrics_entries\x18\x02 \x03(\x0b\x32S.google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry\x1ak\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x03 \x01(\x02\x12\x11\n\tprecision\x18\x04 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x05 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2.DESCRIPTOR, @@ -37,6 +38,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="text_segment", @@ -55,6 +57,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="score", @@ -73,6 +76,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -88,6 +92,7 @@ full_name="google.cloud.automl.v1beta1.TextExtractionAnnotation.annotation", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -102,6 +107,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="confidence_threshold", @@ -120,6 +126,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="recall", @@ -138,6 +145,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="precision", @@ -156,6 +164,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="f1_score", @@ -174,6 +183,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -194,6 +204,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="au_prc", @@ -212,6 +223,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence_metrics_entries", @@ -230,6 +242,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/text_pb2.py b/google/cloud/automl_v1beta1/proto/text_pb2.py index 08d48292..3b36d894 100644 --- a/google/cloud/automl_v1beta1/proto/text_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_pb2.py @@ -23,6 +23,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\tTextProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n,google/cloud/automl_v1beta1/proto/text.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto"q\n!TextClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"o\n\x1fTextClassificationModelMetadata\x12L\n\x13\x63lassification_type\x18\x03 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"\x1f\n\x1dTextExtractionDatasetMetadata"\x1d\n\x1bTextExtractionModelMetadata"5\n\x1cTextSentimentDatasetMetadata\x12\x15\n\rsentiment_max\x18\x01 \x01(\x05"\x1c\n\x1aTextSentimentModelMetadataB\xb0\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\tTextProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, @@ -37,6 +38,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="classification_type", @@ -55,6 +57,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -76,6 +79,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="classification_type", @@ -94,6 +98,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -115,6 +120,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -135,6 +141,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -155,6 +162,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="sentiment_max", @@ -173,6 +181,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -194,6 +203,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], diff --git a/google/cloud/automl_v1beta1/proto/text_segment_pb2.py b/google/cloud/automl_v1beta1/proto/text_segment_pb2.py index 150c0245..4327112e 100644 --- a/google/cloud/automl_v1beta1/proto/text_segment_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_segment_pb2.py @@ -20,6 +20,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\020TextSegmentProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n4google/cloud/automl_v1beta1/proto/text_segment.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"H\n\x0bTextSegment\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\x14\n\x0cstart_offset\x18\x01 \x01(\x03\x12\x12\n\nend_offset\x18\x02 \x01(\x03\x42\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x10TextSegmentProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], ) @@ -31,6 +32,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="content", @@ -49,6 +51,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="start_offset", @@ -67,6 +70,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_offset", @@ -85,6 +89,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py b/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py index 737523cd..c1e80777 100644 --- a/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py @@ -23,6 +23,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\022TextSentimentProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n6google/cloud/automl_v1beta1/proto/text_sentiment.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto",\n\x17TextSentimentAnnotation\x12\x11\n\tsentiment\x18\x01 \x01(\x05"\xc5\x02\n\x1eTextSentimentEvaluationMetrics\x12\x11\n\tprecision\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x03 \x01(\x02\x12\x1b\n\x13mean_absolute_error\x18\x04 \x01(\x02\x12\x1a\n\x12mean_squared_error\x18\x05 \x01(\x02\x12\x14\n\x0clinear_kappa\x18\x06 \x01(\x02\x12\x17\n\x0fquadratic_kappa\x18\x07 \x01(\x02\x12\x66\n\x10\x63onfusion_matrix\x18\x08 \x01(\x0b\x32L.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix\x12\x1e\n\x12\x61nnotation_spec_id\x18\t \x03(\tB\x02\x18\x01\x42\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x12TextSentimentProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, @@ -37,6 +38,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="sentiment", @@ -55,6 +57,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -76,6 +79,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="precision", @@ -94,6 +98,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="recall", @@ -112,6 +117,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="f1_score", @@ -130,6 +136,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mean_absolute_error", @@ -148,6 +155,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mean_squared_error", @@ -166,6 +174,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="linear_kappa", @@ -184,6 +193,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="quadratic_kappa", @@ -202,6 +212,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confusion_matrix", @@ -220,6 +231,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="annotation_spec_id", @@ -238,6 +250,7 @@ extension_scope=None, serialized_options=b"\030\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/translation_pb2.py b/google/cloud/automl_v1beta1/proto/translation_pb2.py index 411a25bb..bc17be61 100644 --- a/google/cloud/automl_v1beta1/proto/translation_pb2.py +++ b/google/cloud/automl_v1beta1/proto/translation_pb2.py @@ -24,6 +24,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\020TranslationProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n3google/cloud/automl_v1beta1/proto/translation.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a\x1cgoogle/api/annotations.proto"b\n\x1aTranslationDatasetMetadata\x12!\n\x14source_language_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12!\n\x14target_language_code\x18\x02 \x01(\tB\x03\xe0\x41\x02"K\n\x1cTranslationEvaluationMetrics\x12\x12\n\nbleu_score\x18\x01 \x01(\x01\x12\x17\n\x0f\x62\x61se_bleu_score\x18\x02 \x01(\x01"j\n\x18TranslationModelMetadata\x12\x12\n\nbase_model\x18\x01 \x01(\t\x12\x1c\n\x14source_language_code\x18\x02 \x01(\t\x12\x1c\n\x14target_language_code\x18\x03 \x01(\t"]\n\x15TranslationAnnotation\x12\x44\n\x12translated_content\x18\x01 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippetB\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x10TranslationProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, @@ -39,6 +40,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="source_language_code", @@ -57,6 +59,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="target_language_code", @@ -75,6 +78,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -96,6 +100,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="bleu_score", @@ -114,6 +119,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="base_bleu_score", @@ -132,6 +138,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -153,6 +160,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="base_model", @@ -171,6 +179,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="source_language_code", @@ -189,6 +198,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="target_language_code", @@ -207,6 +217,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -228,6 +239,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="translated_content", @@ -246,6 +258,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], diff --git a/google/cloud/automl_v1beta1/proto/video_pb2.py b/google/cloud/automl_v1beta1/proto/video_pb2.py index e1903ba3..b870cb4c 100644 --- a/google/cloud/automl_v1beta1/proto/video_pb2.py +++ b/google/cloud/automl_v1beta1/proto/video_pb2.py @@ -23,6 +23,7 @@ package="google.cloud.automl.v1beta1", syntax="proto3", serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\nVideoProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n-google/cloud/automl_v1beta1/proto/video.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto"$\n"VideoClassificationDatasetMetadata"$\n"VideoObjectTrackingDatasetMetadata""\n VideoClassificationModelMetadata""\n VideoObjectTrackingModelMetadataB\xb1\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\nVideoProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, @@ -37,6 +38,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -57,6 +59,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -77,6 +80,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -97,6 +101,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], diff --git a/synth.metadata b/synth.metadata index 72f56270..3fce263a 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "dec3204175104cef49bf21d685d5517caaf0058f", - "internalRef": "312689208" + "sha": "c4e37010d74071851ff24121f522e802231ac86e", + "internalRef": "313460921" } }, { From 357ae0ec202e47f64259c9d3831fd4023bacabb4 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Tue, 23 Jun 2020 05:48:23 -0700 Subject: [PATCH 4/9] chore: update gapic-generator and go microgen changes include: - build_gen: go lro gapic used as dep - go_gapic_library: fixes shading of go_library importpath PiperOrigin-RevId: 314363155 Source-Author: Google APIs Source-Date: Tue Jun 2 10:56:09 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 3a4894c4f0da3e763aca2c67bd280ae915177450 Source-Link: https://github.com/googleapis/googleapis/commit/3a4894c4f0da3e763aca2c67bd280ae915177450 --- google/cloud/automl_v1/__init__.py | 4 ++-- google/cloud/automl_v1beta1/__init__.py | 4 ++-- synth.metadata | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/google/cloud/automl_v1/__init__.py b/google/cloud/automl_v1/__init__.py index 361bad1e..3c9ade66 100644 --- a/google/cloud/automl_v1/__init__.py +++ b/google/cloud/automl_v1/__init__.py @@ -27,8 +27,8 @@ if sys.version_info[:2] == (2, 7): message = ( - "A future version of this library will drop support for Python 2.7." - "More details about Python 2 support for Google Cloud Client Libraries" + "A future version of this library will drop support for Python 2.7. " + "More details about Python 2 support for Google Cloud Client Libraries " "can be found at https://cloud.google.com/python/docs/python2-sunset/" ) warnings.warn(message, DeprecationWarning) diff --git a/google/cloud/automl_v1beta1/__init__.py b/google/cloud/automl_v1beta1/__init__.py index 846ef59a..8fcf0590 100644 --- a/google/cloud/automl_v1beta1/__init__.py +++ b/google/cloud/automl_v1beta1/__init__.py @@ -37,8 +37,8 @@ class GcsClient(gcs_client.GcsClient): if sys.version_info[:2] == (2, 7): message = ( - "A future version of this library will drop support for Python 2.7." - "More details about Python 2 support for Google Cloud Client Libraries" + "A future version of this library will drop support for Python 2.7. " + "More details about Python 2 support for Google Cloud Client Libraries " "can be found at https://cloud.google.com/python/docs/python2-sunset/" ) warnings.warn(message, DeprecationWarning) diff --git a/synth.metadata b/synth.metadata index 3fce263a..9d23d01f 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "c4e37010d74071851ff24121f522e802231ac86e", - "internalRef": "313460921" + "sha": "3a4894c4f0da3e763aca2c67bd280ae915177450", + "internalRef": "314363155" } }, { From 004959aa2034f62645520ca2d771c9d04388c4fc Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Tue, 23 Jun 2020 05:51:42 -0700 Subject: [PATCH 5/9] fix: use protoc-docs-plugin 0.8.0 Fixes issue with missing newline before 'Attributes' in Python docstrings. PiperOrigin-RevId: 316182409 Source-Author: Google APIs Source-Date: Fri Jun 12 14:52:11 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 184661793fbe3b89f2b485c303e7466cef9d21a1 Source-Link: https://github.com/googleapis/googleapis/commit/184661793fbe3b89f2b485c303e7466cef9d21a1 --- .../automl_v1/proto/annotation_payload_pb2.py | 1 + .../automl_v1/proto/annotation_spec_pb2.py | 1 + .../automl_v1/proto/classification_pb2.py | 5 ++++ .../cloud/automl_v1/proto/data_items_pb2.py | 6 ++++ google/cloud/automl_v1/proto/dataset_pb2.py | 1 + google/cloud/automl_v1/proto/detection_pb2.py | 4 +++ google/cloud/automl_v1/proto/geometry_pb2.py | 2 ++ google/cloud/automl_v1/proto/image_pb2.py | 5 ++++ google/cloud/automl_v1/proto/io_pb2.py | 8 +++++ .../automl_v1/proto/model_evaluation_pb2.py | 1 + google/cloud/automl_v1/proto/model_pb2.py | 1 + .../cloud/automl_v1/proto/operations_pb2.py | 7 +++++ .../automl_v1/proto/prediction_service_pb2.py | 4 +++ google/cloud/automl_v1/proto/service_pb2.py | 21 ++++++++++++++ .../automl_v1/proto/text_extraction_pb2.py | 3 ++ google/cloud/automl_v1/proto/text_pb2.py | 3 ++ .../cloud/automl_v1/proto/text_segment_pb2.py | 1 + .../automl_v1/proto/text_sentiment_pb2.py | 2 ++ .../cloud/automl_v1/proto/translation_pb2.py | 4 +++ .../proto/annotation_payload_pb2.py | 1 + .../proto/annotation_spec_pb2.py | 1 + .../proto/classification_pb2.py | 6 ++++ .../automl_v1beta1/proto/column_spec_pb2.py | 2 ++ .../automl_v1beta1/proto/data_items_pb2.py | 7 +++++ .../automl_v1beta1/proto/data_stats_pb2.py | 12 ++++++++ .../automl_v1beta1/proto/data_types_pb2.py | 2 ++ .../cloud/automl_v1beta1/proto/dataset_pb2.py | 1 + .../automl_v1beta1/proto/detection_pb2.py | 6 ++++ .../automl_v1beta1/proto/geometry_pb2.py | 2 ++ .../cloud/automl_v1beta1/proto/image_pb2.py | 5 ++++ google/cloud/automl_v1beta1/proto/io_pb2.py | 12 ++++++++ .../proto/model_evaluation_pb2.py | 1 + .../cloud/automl_v1beta1/proto/model_pb2.py | 1 + .../automl_v1beta1/proto/operations_pb2.py | 9 ++++++ .../proto/prediction_service_pb2.py | 4 +++ .../cloud/automl_v1beta1/proto/ranges_pb2.py | 1 + .../automl_v1beta1/proto/regression_pb2.py | 1 + .../cloud/automl_v1beta1/proto/service_pb2.py | 29 +++++++++++++++++++ .../automl_v1beta1/proto/table_spec_pb2.py | 1 + .../cloud/automl_v1beta1/proto/tables_pb2.py | 4 +++ .../automl_v1beta1/proto/temporal_pb2.py | 1 + .../proto/text_extraction_pb2.py | 3 ++ google/cloud/automl_v1beta1/proto/text_pb2.py | 3 ++ .../automl_v1beta1/proto/text_segment_pb2.py | 1 + .../proto/text_sentiment_pb2.py | 2 ++ .../automl_v1beta1/proto/translation_pb2.py | 4 +++ synth.metadata | 4 +-- 47 files changed, 204 insertions(+), 2 deletions(-) diff --git a/google/cloud/automl_v1/proto/annotation_payload_pb2.py b/google/cloud/automl_v1/proto/annotation_payload_pb2.py index c0ad5894..8cdfd04f 100644 --- a/google/cloud/automl_v1/proto/annotation_payload_pb2.py +++ b/google/cloud/automl_v1/proto/annotation_payload_pb2.py @@ -278,6 +278,7 @@ "DESCRIPTOR": _ANNOTATIONPAYLOAD, "__module__": "google.cloud.automl_v1.proto.annotation_payload_pb2", "__doc__": """Contains annotation information that is relevant to AutoML. + Attributes: detail: Output only . Additional information about the annotation diff --git a/google/cloud/automl_v1/proto/annotation_spec_pb2.py b/google/cloud/automl_v1/proto/annotation_spec_pb2.py index b5bcf737..828b2e9c 100644 --- a/google/cloud/automl_v1/proto/annotation_spec_pb2.py +++ b/google/cloud/automl_v1/proto/annotation_spec_pb2.py @@ -118,6 +118,7 @@ "DESCRIPTOR": _ANNOTATIONSPEC, "__module__": "google.cloud.automl_v1.proto.annotation_spec_pb2", "__doc__": """A definition of an annotation spec. + Attributes: name: Output only. Resource name of the annotation spec. Form: ‘pro diff --git a/google/cloud/automl_v1/proto/classification_pb2.py b/google/cloud/automl_v1/proto/classification_pb2.py index 118c79bb..5d23efdd 100644 --- a/google/cloud/automl_v1/proto/classification_pb2.py +++ b/google/cloud/automl_v1/proto/classification_pb2.py @@ -687,6 +687,7 @@ "DESCRIPTOR": _CLASSIFICATIONANNOTATION, "__module__": "google.cloud.automl_v1.proto.classification_pb2", "__doc__": """Contains annotation details specific to classification. + Attributes: score: Output only. A confidence estimate between 0.0 and 1.0. A @@ -711,6 +712,7 @@ "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, "__module__": "google.cloud.automl_v1.proto.classification_pb2", "__doc__": """Metrics for a single confidence threshold. + Attributes: confidence_threshold: Output only. Metrics are computed with an assumption that the @@ -777,6 +779,7 @@ "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW, "__module__": "google.cloud.automl_v1.proto.classification_pb2", "__doc__": """Output only. A row in the confusion matrix. + Attributes: example_count: Output only. Value of the specific cell in the confusion @@ -792,6 +795,7 @@ "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX, "__module__": "google.cloud.automl_v1.proto.classification_pb2", "__doc__": """Confusion matrix of the model running the classification. + Attributes: annotation_spec_id: Output only. IDs of the annotation specs used in the confusion @@ -821,6 +825,7 @@ "__doc__": """Model evaluation metrics for classification problems. Note: For Video Classification this metrics only describe quality of the Video Classification predictions of “segment_classification” type. + Attributes: au_prc: Output only. The Area Under Precision-Recall Curve metric. diff --git a/google/cloud/automl_v1/proto/data_items_pb2.py b/google/cloud/automl_v1/proto/data_items_pb2.py index e58ae99d..3cb6dd33 100644 --- a/google/cloud/automl_v1/proto/data_items_pb2.py +++ b/google/cloud/automl_v1/proto/data_items_pb2.py @@ -782,6 +782,7 @@ "__module__": "google.cloud.automl_v1.proto.data_items_pb2", "__doc__": """A representation of an image. Only images up to 30MB in size are supported. + Attributes: data: Input only. The data representing the image. For Predict calls @@ -806,6 +807,7 @@ "DESCRIPTOR": _TEXTSNIPPET, "__module__": "google.cloud.automl_v1.proto.data_items_pb2", "__doc__": """A representation of a text snippet. + Attributes: content: Required. The content of the text snippet as a string. Up to @@ -832,6 +834,7 @@ "DESCRIPTOR": _DOCUMENTDIMENSIONS, "__module__": "google.cloud.automl_v1.proto.data_items_pb2", "__doc__": """Message that describes dimension of a document. + Attributes: unit: Unit of the dimension. @@ -858,6 +861,7 @@ "__doc__": """Describes the layout information of a [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in the document. + Attributes: text_segment: Text Segment that represents a segment in [document_text][goog @@ -885,6 +889,7 @@ "DESCRIPTOR": _DOCUMENT, "__module__": "google.cloud.automl_v1.proto.data_items_pb2", "__doc__": """A structured text document e.g. a PDF. + Attributes: input_config: An input config specifying the content of the document. @@ -911,6 +916,7 @@ "DESCRIPTOR": _EXAMPLEPAYLOAD, "__module__": "google.cloud.automl_v1.proto.data_items_pb2", "__doc__": """Example data used for training or prediction. + Attributes: payload: Required. The example data. diff --git a/google/cloud/automl_v1/proto/dataset_pb2.py b/google/cloud/automl_v1/proto/dataset_pb2.py index c969bcb1..4949f03e 100644 --- a/google/cloud/automl_v1/proto/dataset_pb2.py +++ b/google/cloud/automl_v1/proto/dataset_pb2.py @@ -471,6 +471,7 @@ "__module__": "google.cloud.automl_v1.proto.dataset_pb2", "__doc__": """A workspace for solving a single, particular machine learning (ML) problem. A workspace contains examples that may be annotated. + Attributes: dataset_metadata: Required. The dataset metadata that is specific to the problem diff --git a/google/cloud/automl_v1/proto/detection_pb2.py b/google/cloud/automl_v1/proto/detection_pb2.py index e10bace6..9df67fd3 100644 --- a/google/cloud/automl_v1/proto/detection_pb2.py +++ b/google/cloud/automl_v1/proto/detection_pb2.py @@ -378,6 +378,7 @@ "DESCRIPTOR": _IMAGEOBJECTDETECTIONANNOTATION, "__module__": "google.cloud.automl_v1.proto.detection_pb2", "__doc__": """Annotation details for image object detection. + Attributes: bounding_box: Output only. The rectangle representing the object location. @@ -402,6 +403,7 @@ "DESCRIPTOR": _BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY, "__module__": "google.cloud.automl_v1.proto.detection_pb2", "__doc__": """Metrics for a single confidence threshold. + Attributes: confidence_threshold: Output only. The confidence threshold value used to compute @@ -420,6 +422,7 @@ "__module__": "google.cloud.automl_v1.proto.detection_pb2", "__doc__": """Bounding box matching model metrics for a single intersection-over- union threshold and multiple label match confidence thresholds. + Attributes: iou_threshold: Output only. The intersection-over-union threshold value used @@ -446,6 +449,7 @@ "__module__": "google.cloud.automl_v1.proto.detection_pb2", "__doc__": """Model evaluation metrics for image object detection problems. Evaluates prediction quality of labeled bounding boxes. + Attributes: evaluated_bounding_box_count: Output only. The total number of bounding boxes (i.e. summed diff --git a/google/cloud/automl_v1/proto/geometry_pb2.py b/google/cloud/automl_v1/proto/geometry_pb2.py index bcc1597a..6be5d28b 100644 --- a/google/cloud/automl_v1/proto/geometry_pb2.py +++ b/google/cloud/automl_v1/proto/geometry_pb2.py @@ -138,6 +138,7 @@ "DESCRIPTOR": _NORMALIZEDVERTEX, "__module__": "google.cloud.automl_v1.proto.geometry_pb2", "__doc__": """Required. Horizontal coordinate. + Attributes: y: Required. Vertical coordinate. @@ -156,6 +157,7 @@ "__doc__": """A bounding polygon of a detected object on a plane. On output both vertices and normalized_vertices are provided. The polygon is formed by connecting vertices in the order they are listed. + Attributes: normalized_vertices: Output only . The bounding polygon normalized vertices. diff --git a/google/cloud/automl_v1/proto/image_pb2.py b/google/cloud/automl_v1/proto/image_pb2.py index 560972f8..fd2e4c26 100644 --- a/google/cloud/automl_v1/proto/image_pb2.py +++ b/google/cloud/automl_v1/proto/image_pb2.py @@ -506,6 +506,7 @@ "DESCRIPTOR": _IMAGECLASSIFICATIONDATASETMETADATA, "__module__": "google.cloud.automl_v1.proto.image_pb2", "__doc__": """Dataset metadata that is specific to image classification. + Attributes: classification_type: Required. Type of the classification problem. @@ -534,6 +535,7 @@ "DESCRIPTOR": _IMAGECLASSIFICATIONMODELMETADATA, "__module__": "google.cloud.automl_v1.proto.image_pb2", "__doc__": """Model metadata for image classification. + Attributes: base_model_id: Optional. The ID of the ``base`` model. If it is specified, @@ -622,6 +624,7 @@ "DESCRIPTOR": _IMAGEOBJECTDETECTIONMODELMETADATA, "__module__": "google.cloud.automl_v1.proto.image_pb2", "__doc__": """Model metadata specific to image object detection. + Attributes: model_type: Optional. Type of the model. The available values are: \* @@ -693,6 +696,7 @@ "DESCRIPTOR": _IMAGECLASSIFICATIONMODELDEPLOYMENTMETADATA, "__module__": "google.cloud.automl_v1.proto.image_pb2", "__doc__": """Model deployment metadata specific to Image Classification. + Attributes: node_count: Input only. The number of nodes to deploy the model on. A node @@ -713,6 +717,7 @@ "DESCRIPTOR": _IMAGEOBJECTDETECTIONMODELDEPLOYMENTMETADATA, "__module__": "google.cloud.automl_v1.proto.image_pb2", "__doc__": """Model deployment metadata specific to Image Object Detection. + Attributes: node_count: Input only. The number of nodes to deploy the model on. A node diff --git a/google/cloud/automl_v1/proto/io_pb2.py b/google/cloud/automl_v1/proto/io_pb2.py index 836ca5bf..90f14fea 100644 --- a/google/cloud/automl_v1/proto/io_pb2.py +++ b/google/cloud/automl_v1/proto/io_pb2.py @@ -994,6 +994,7 @@ is imported. Regardless of overall success or failure the per-row failures, up to a certain count cap, is listed in Operation.metadata.partial_failures. + Attributes: source: The source of the input. @@ -1166,6 +1167,7 @@ prediction does not happen. Regardless of overall success or failure the per-row failures, up to a certain count cap, will be listed in Operation.metadata.partial_failures. + Attributes: source: The source of the input. @@ -1185,6 +1187,7 @@ "DESCRIPTOR": _DOCUMENTINPUTCONFIG, "__module__": "google.cloud.automl_v1.proto.io_pb2", "__doc__": """Input configuration of a [Document][google.cloud.automl.v1.Document]. + Attributes: gcs_source: The Google Cloud Storage location of the document file. Only a @@ -1222,6 +1225,7 @@ YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In that dataset a new table called ``primary_table`` will be created, and filled with precisely the same data as this obtained on import. + Attributes: destination: The destination of the output. @@ -1458,6 +1462,7 @@ ```google.rpc.Status`` `__ represented as a STRUCT, and containing only ``code`` and ``message``. + Attributes: destination: The destination of the output. @@ -1486,6 +1491,7 @@ "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG, "__module__": "google.cloud.automl_v1.proto.io_pb2", "__doc__": """Output configuration for ModelExport Action. + Attributes: destination: The destination of the output. @@ -1544,6 +1550,7 @@ "DESCRIPTOR": _GCSSOURCE, "__module__": "google.cloud.automl_v1.proto.io_pb2", "__doc__": """The Google Cloud Storage location for the input content. + Attributes: input_uris: Required. Google Cloud Storage URIs to input files, up to 2000 @@ -1563,6 +1570,7 @@ "__module__": "google.cloud.automl_v1.proto.io_pb2", "__doc__": """The Google Cloud Storage location where the output is to be written to. + Attributes: output_uri_prefix: Required. Google Cloud Storage URI to output directory, up to diff --git a/google/cloud/automl_v1/proto/model_evaluation_pb2.py b/google/cloud/automl_v1/proto/model_evaluation_pb2.py index f28d3b1d..4975d076 100644 --- a/google/cloud/automl_v1/proto/model_evaluation_pb2.py +++ b/google/cloud/automl_v1/proto/model_evaluation_pb2.py @@ -340,6 +340,7 @@ "DESCRIPTOR": _MODELEVALUATION, "__module__": "google.cloud.automl_v1.proto.model_evaluation_pb2", "__doc__": """Evaluation results of a model. + Attributes: metrics: Output only. Problem type specific evaluation metrics. diff --git a/google/cloud/automl_v1/proto/model_pb2.py b/google/cloud/automl_v1/proto/model_pb2.py index 38ebb945..5127c6e6 100644 --- a/google/cloud/automl_v1/proto/model_pb2.py +++ b/google/cloud/automl_v1/proto/model_pb2.py @@ -534,6 +534,7 @@ "DESCRIPTOR": _MODEL, "__module__": "google.cloud.automl_v1.proto.model_pb2", "__doc__": """API proto representing a trained machine learning model. + Attributes: model_metadata: Required. The model metadata that is specific to the problem diff --git a/google/cloud/automl_v1/proto/operations_pb2.py b/google/cloud/automl_v1/proto/operations_pb2.py index 1c86ae1c..4931ccda 100644 --- a/google/cloud/automl_v1/proto/operations_pb2.py +++ b/google/cloud/automl_v1/proto/operations_pb2.py @@ -896,6 +896,7 @@ "__module__": "google.cloud.automl_v1.proto.operations_pb2", "__doc__": """Metadata used across all long running operations returned by AutoML API. + Attributes: details: Ouptut only. Details of specific operation. Even if this field @@ -1022,6 +1023,7 @@ "__module__": "google.cloud.automl_v1.proto.operations_pb2", "__doc__": """Further describes this export data’s output. Supplements [OutputConfig][google.cloud.automl.v1.OutputConfig]. + Attributes: output_location: The output location to which the exported data is written. @@ -1035,6 +1037,7 @@ "DESCRIPTOR": _EXPORTDATAOPERATIONMETADATA, "__module__": "google.cloud.automl_v1.proto.operations_pb2", "__doc__": """Details of ExportData operation. + Attributes: output_info: Output only. Information further describing this export data’s @@ -1058,6 +1061,7 @@ "__module__": "google.cloud.automl_v1.proto.operations_pb2", "__doc__": """Further describes this batch predict’s output. Supplements [BatchPred ictOutputConfig][google.cloud.automl.v1.BatchPredictOutputConfig]. + Attributes: output_location: The output location into which prediction output is written. @@ -1071,6 +1075,7 @@ "DESCRIPTOR": _BATCHPREDICTOPERATIONMETADATA, "__module__": "google.cloud.automl_v1.proto.operations_pb2", "__doc__": """Details of BatchPredict operation. + Attributes: input_config: Output only. The input config that was given upon starting @@ -1097,6 +1102,7 @@ "__module__": "google.cloud.automl_v1.proto.operations_pb2", "__doc__": """Further describes the output of model export. Supplements [ModelExport OutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. + Attributes: gcs_output_directory: The full path of the Google Cloud Storage directory created, @@ -1108,6 +1114,7 @@ "DESCRIPTOR": _EXPORTMODELOPERATIONMETADATA, "__module__": "google.cloud.automl_v1.proto.operations_pb2", "__doc__": """Details of ExportModel operation. + Attributes: output_info: Output only. Information further describing the output of this diff --git a/google/cloud/automl_v1/proto/prediction_service_pb2.py b/google/cloud/automl_v1/proto/prediction_service_pb2.py index 62cecceb..0f6cfe79 100644 --- a/google/cloud/automl_v1/proto/prediction_service_pb2.py +++ b/google/cloud/automl_v1/proto/prediction_service_pb2.py @@ -648,6 +648,7 @@ "__module__": "google.cloud.automl_v1.proto.prediction_service_pb2", "__doc__": """Request message for [PredictionService.Predict][google.cloud.automl.v1 .PredictionService.Predict]. + Attributes: name: Required. Name of the model requested to serve the prediction. @@ -697,6 +698,7 @@ "__module__": "google.cloud.automl_v1.proto.prediction_service_pb2", "__doc__": """Response message for [PredictionService.Predict][google.cloud.automl.v 1.PredictionService.Predict]. + Attributes: payload: Prediction result. AutoML Translation and AutoML Natural @@ -746,6 +748,7 @@ "__module__": "google.cloud.automl_v1.proto.prediction_service_pb2", "__doc__": """Request message for [PredictionService.BatchPredict][google.cloud.auto ml.v1.PredictionService.BatchPredict]. + Attributes: name: Required. Name of the model requested to serve the batch @@ -837,6 +840,7 @@ [response][google.longrunning.Operation.response] of the operation returned by the [PredictionService.BatchPredict][google.cloud.automl.v 1.PredictionService.BatchPredict]. + Attributes: metadata: Additional domain-specific prediction response metadata. diff --git a/google/cloud/automl_v1/proto/service_pb2.py b/google/cloud/automl_v1/proto/service_pb2.py index c869c254..7b6bfcf2 100644 --- a/google/cloud/automl_v1/proto/service_pb2.py +++ b/google/cloud/automl_v1/proto/service_pb2.py @@ -1441,6 +1441,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. + Attributes: parent: Required. The resource name of the project to create the @@ -1461,6 +1462,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. + Attributes: name: Required. The resource name of the dataset to retrieve. @@ -1478,6 +1480,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + Attributes: parent: Required. The resource name of the project from which to list @@ -1512,6 +1515,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Response message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + Attributes: datasets: The datasets read. @@ -1533,6 +1537,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] + Attributes: dataset: Required. The dataset which replaces the resource on the @@ -1553,6 +1558,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. + Attributes: name: Required. The resource name of the dataset to delete. @@ -1570,6 +1576,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. + Attributes: name: Required. Dataset name. Dataset must already exist. All @@ -1591,6 +1598,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. + Attributes: name: Required. The resource name of the dataset. @@ -1610,6 +1618,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1. AutoMl.GetAnnotationSpec]. + Attributes: name: Required. The resource name of the annotation spec to @@ -1628,6 +1637,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. + Attributes: parent: Required. Resource name of the parent project where the model @@ -1648,6 +1658,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. + Attributes: name: Required. Resource name of the model. @@ -1665,6 +1676,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + Attributes: parent: Required. Resource name of the project, from which to list the @@ -1700,6 +1712,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Response message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + Attributes: model: List of models in the requested page. @@ -1721,6 +1734,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. + Attributes: name: Required. Resource name of the model being deleted. @@ -1738,6 +1752,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] + Attributes: model: Required. The model which replaces the resource on the server. @@ -1757,6 +1772,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. + Attributes: model_deployment_metadata: The per-domain specific deployment parameters. @@ -1780,6 +1796,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. + Attributes: name: Required. Resource name of the model to undeploy. @@ -1799,6 +1816,7 @@ [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. Models need to be enabled for exporting, otherwise an error code will be returned. + Attributes: name: Required. The resource name of the model to export. @@ -1818,6 +1836,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1 .AutoMl.GetModelEvaluation]. + Attributes: name: Required. Resource name for the model evaluation. @@ -1835,6 +1854,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ListModelEvaluations][google.cloud.automl. v1.AutoMl.ListModelEvaluations]. + Attributes: parent: Required. Resource name of the model to list the model @@ -1872,6 +1892,7 @@ "__module__": "google.cloud.automl_v1.proto.service_pb2", "__doc__": """Response message for [AutoMl.ListModelEvaluations][google.cloud.automl .v1.AutoMl.ListModelEvaluations]. + Attributes: model_evaluation: List of model evaluations in the requested page. diff --git a/google/cloud/automl_v1/proto/text_extraction_pb2.py b/google/cloud/automl_v1/proto/text_extraction_pb2.py index 79caedcb..951204b3 100644 --- a/google/cloud/automl_v1/proto/text_extraction_pb2.py +++ b/google/cloud/automl_v1/proto/text_extraction_pb2.py @@ -287,6 +287,7 @@ "DESCRIPTOR": _TEXTEXTRACTIONANNOTATION, "__module__": "google.cloud.automl_v1.proto.text_extraction_pb2", "__doc__": """Annotation for identifying spans of text. + Attributes: annotation: Required. Text extraction annotations can either be a text @@ -315,6 +316,7 @@ "DESCRIPTOR": _TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, "__module__": "google.cloud.automl_v1.proto.text_extraction_pb2", "__doc__": """Metrics for a single confidence threshold. + Attributes: confidence_threshold: Output only. The confidence threshold value used to compute @@ -333,6 +335,7 @@ "DESCRIPTOR": _TEXTEXTRACTIONEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1.proto.text_extraction_pb2", "__doc__": """Model evaluation metrics for text extraction problems. + Attributes: au_prc: Output only. The Area under precision recall curve metric. diff --git a/google/cloud/automl_v1/proto/text_pb2.py b/google/cloud/automl_v1/proto/text_pb2.py index 4fb6231e..38c6d0f0 100644 --- a/google/cloud/automl_v1/proto/text_pb2.py +++ b/google/cloud/automl_v1/proto/text_pb2.py @@ -254,6 +254,7 @@ "DESCRIPTOR": _TEXTCLASSIFICATIONDATASETMETADATA, "__module__": "google.cloud.automl_v1.proto.text_pb2", "__doc__": """Dataset metadata for classification. + Attributes: classification_type: Required. Type of the classification problem. @@ -270,6 +271,7 @@ "DESCRIPTOR": _TEXTCLASSIFICATIONMODELMETADATA, "__module__": "google.cloud.automl_v1.proto.text_pb2", "__doc__": """Model metadata that is specific to text classification. + Attributes: classification_type: Output only. Classification type of the dataset used to train @@ -311,6 +313,7 @@ "DESCRIPTOR": _TEXTSENTIMENTDATASETMETADATA, "__module__": "google.cloud.automl_v1.proto.text_pb2", "__doc__": """Dataset metadata for text sentiment. + Attributes: sentiment_max: Required. A sentiment is expressed as an integer ordinal, diff --git a/google/cloud/automl_v1/proto/text_segment_pb2.py b/google/cloud/automl_v1/proto/text_segment_pb2.py index ed3a2d10..8abf20a3 100644 --- a/google/cloud/automl_v1/proto/text_segment_pb2.py +++ b/google/cloud/automl_v1/proto/text_segment_pb2.py @@ -115,6 +115,7 @@ "__module__": "google.cloud.automl_v1.proto.text_segment_pb2", "__doc__": """A contiguous part of a text (string), assuming it has an UTF-8 NFC encoding. + Attributes: content: Output only. The content of the TextSegment. diff --git a/google/cloud/automl_v1/proto/text_sentiment_pb2.py b/google/cloud/automl_v1/proto/text_sentiment_pb2.py index 2b55f43e..652603ee 100644 --- a/google/cloud/automl_v1/proto/text_sentiment_pb2.py +++ b/google/cloud/automl_v1/proto/text_sentiment_pb2.py @@ -264,6 +264,7 @@ "DESCRIPTOR": _TEXTSENTIMENTANNOTATION, "__module__": "google.cloud.automl_v1.proto.text_sentiment_pb2", "__doc__": """Contains annotation details specific to text sentiment. + Attributes: sentiment: Output only. The sentiment with the semantic, as given to the @@ -293,6 +294,7 @@ "DESCRIPTOR": _TEXTSENTIMENTEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1.proto.text_sentiment_pb2", "__doc__": """Model evaluation metrics for text sentiment problems. + Attributes: precision: Output only. Precision. diff --git a/google/cloud/automl_v1/proto/translation_pb2.py b/google/cloud/automl_v1/proto/translation_pb2.py index 4e0a19fc..ebe7e941 100644 --- a/google/cloud/automl_v1/proto/translation_pb2.py +++ b/google/cloud/automl_v1/proto/translation_pb2.py @@ -295,6 +295,7 @@ "DESCRIPTOR": _TRANSLATIONDATASETMETADATA, "__module__": "google.cloud.automl_v1.proto.translation_pb2", "__doc__": """Dataset metadata that is specific to translation. + Attributes: source_language_code: Required. The BCP-47 language code of the source language. @@ -313,6 +314,7 @@ "DESCRIPTOR": _TRANSLATIONEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1.proto.translation_pb2", "__doc__": """Evaluation metrics for the dataset. + Attributes: bleu_score: Output only. BLEU score. @@ -331,6 +333,7 @@ "DESCRIPTOR": _TRANSLATIONMODELMETADATA, "__module__": "google.cloud.automl_v1.proto.translation_pb2", "__doc__": """Model metadata that is specific to translation. + Attributes: base_model: The resource name of the model to use as a baseline to train @@ -356,6 +359,7 @@ "DESCRIPTOR": _TRANSLATIONANNOTATION, "__module__": "google.cloud.automl_v1.proto.translation_pb2", "__doc__": """Annotation details specific to translation. + Attributes: translated_content: Output only . The translated content. diff --git a/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py b/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py index e299a03f..bf06fb77 100644 --- a/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py +++ b/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py @@ -372,6 +372,7 @@ "DESCRIPTOR": _ANNOTATIONPAYLOAD, "__module__": "google.cloud.automl_v1beta1.proto.annotation_payload_pb2", "__doc__": """Contains annotation information that is relevant to AutoML. + Attributes: detail: Output only . Additional information about the annotation diff --git a/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py b/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py index 1c561cf9..c259a290 100644 --- a/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py +++ b/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py @@ -118,6 +118,7 @@ "DESCRIPTOR": _ANNOTATIONSPEC, "__module__": "google.cloud.automl_v1beta1.proto.annotation_spec_pb2", "__doc__": """A definition of an annotation spec. + Attributes: name: Output only. Resource name of the annotation spec. Form: ‘pro diff --git a/google/cloud/automl_v1beta1/proto/classification_pb2.py b/google/cloud/automl_v1beta1/proto/classification_pb2.py index 25e1ff7b..9b38e2db 100644 --- a/google/cloud/automl_v1beta1/proto/classification_pb2.py +++ b/google/cloud/automl_v1beta1/proto/classification_pb2.py @@ -802,6 +802,7 @@ "DESCRIPTOR": _CLASSIFICATIONANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", "__doc__": """Contains annotation details specific to classification. + Attributes: score: Output only. A confidence estimate between 0.0 and 1.0. A @@ -822,6 +823,7 @@ "DESCRIPTOR": _VIDEOCLASSIFICATIONANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", "__doc__": """Contains annotation details specific to video classification. + Attributes: type: Output only. Expresses the type of video classification. @@ -868,6 +870,7 @@ "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", "__doc__": """Metrics for a single confidence threshold. + Attributes: confidence_threshold: Output only. Metrics are computed with an assumption that the @@ -934,6 +937,7 @@ "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW, "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", "__doc__": """Output only. A row in the confusion matrix. + Attributes: example_count: Output only. Value of the specific cell in the confusion @@ -949,6 +953,7 @@ "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX, "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", "__doc__": """Confusion matrix of the model running the classification. + Attributes: annotation_spec_id: Output only. IDs of the annotation specs used in the confusion @@ -978,6 +983,7 @@ "__doc__": """Model evaluation metrics for classification problems. Note: For Video Classification this metrics only describe quality of the Video Classification predictions of “segment_classification” type. + Attributes: au_prc: Output only. The Area Under Precision-Recall Curve metric. diff --git a/google/cloud/automl_v1beta1/proto/column_spec_pb2.py b/google/cloud/automl_v1beta1/proto/column_spec_pb2.py index deda85df..b32f9826 100644 --- a/google/cloud/automl_v1beta1/proto/column_spec_pb2.py +++ b/google/cloud/automl_v1beta1/proto/column_spec_pb2.py @@ -266,6 +266,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.column_spec_pb2", "__doc__": """Identifies the table’s column, and its correlation with the column this ColumnSpec describes. + Attributes: column_spec_id: The column_spec_id of the correlated column, which belongs to @@ -281,6 +282,7 @@ "__doc__": """A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were given on import . Used by: \* Tables + Attributes: name: Output only. The resource name of the column specs. Form: ``p diff --git a/google/cloud/automl_v1beta1/proto/data_items_pb2.py b/google/cloud/automl_v1beta1/proto/data_items_pb2.py index dd5df579..303eb85c 100644 --- a/google/cloud/automl_v1beta1/proto/data_items_pb2.py +++ b/google/cloud/automl_v1beta1/proto/data_items_pb2.py @@ -898,6 +898,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", "__doc__": """A representation of an image. Only images up to 30MB in size are supported. + Attributes: data: Input only. The data representing the image. For Predict calls @@ -928,6 +929,7 @@ "DESCRIPTOR": _TEXTSNIPPET, "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", "__doc__": """A representation of a text snippet. + Attributes: content: Required. The content of the text snippet as a string. Up to @@ -954,6 +956,7 @@ "DESCRIPTOR": _DOCUMENTDIMENSIONS, "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", "__doc__": """Message that describes dimension of a document. + Attributes: unit: Unit of the dimension. @@ -979,6 +982,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", "__doc__": """Describes the layout information of a [text_segment][google.cloud.auto ml.v1beta1.Document.Layout.text_segment] in the document. + Attributes: text_segment: Text Segment that represents a segment in [document_text][goog @@ -1006,6 +1010,7 @@ "DESCRIPTOR": _DOCUMENT, "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", "__doc__": """A structured text document e.g. a PDF. + Attributes: input_config: An input config specifying the content of the document. @@ -1032,6 +1037,7 @@ "DESCRIPTOR": _ROW, "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", "__doc__": """A representation of a row in a relational table. + Attributes: column_spec_ids: The resource IDs of the column specs describing the columns of @@ -1060,6 +1066,7 @@ "DESCRIPTOR": _EXAMPLEPAYLOAD, "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", "__doc__": """Example data used for training or prediction. + Attributes: payload: Required. Input only. The example data. diff --git a/google/cloud/automl_v1beta1/proto/data_stats_pb2.py b/google/cloud/automl_v1beta1/proto/data_stats_pb2.py index 3447a04b..dc31756b 100644 --- a/google/cloud/automl_v1beta1/proto/data_stats_pb2.py +++ b/google/cloud/automl_v1beta1/proto/data_stats_pb2.py @@ -1074,6 +1074,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The data statistics of a series of values that share the same DataType. + Attributes: stats: The data statistics specific to a DataType. @@ -1112,6 +1113,7 @@ "DESCRIPTOR": _FLOAT64STATS_HISTOGRAMBUCKET, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """A bucket of a histogram. + Attributes: min: The minimum value of the bucket, inclusive. @@ -1128,6 +1130,7 @@ "DESCRIPTOR": _FLOAT64STATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The data statistics of a series of FLOAT64 values. + Attributes: mean: The mean of the series. @@ -1163,6 +1166,7 @@ "DESCRIPTOR": _STRINGSTATS_UNIGRAMSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The statistics of a unigram. + Attributes: value: The unigram. @@ -1175,6 +1179,7 @@ "DESCRIPTOR": _STRINGSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The data statistics of a series of STRING values. + Attributes: top_unigram_stats: The statistics of the top 20 unigrams, ordered by [count][goog @@ -1206,6 +1211,7 @@ "DESCRIPTOR": _TIMESTAMPSTATS_GRANULARSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """Stats split by a defined in context granularity. + Attributes: buckets: A map from granularity key to example count for that key. E.g. @@ -1227,6 +1233,7 @@ "DESCRIPTOR": _TIMESTAMPSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The data statistics of a series of TIMESTAMP values. + Attributes: granular_stats: The string key is the pre-defined granularity. Currently @@ -1250,6 +1257,7 @@ "DESCRIPTOR": _ARRAYSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The data statistics of a series of ARRAY values. + Attributes: member_stats: Stats of all the values of all arrays, as if they were a @@ -1277,6 +1285,7 @@ "DESCRIPTOR": _STRUCTSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The data statistics of a series of STRUCT values. + Attributes: field_stats: Map from a field name of the struct to data stats aggregated @@ -1299,6 +1308,7 @@ "DESCRIPTOR": _CATEGORYSTATS_SINGLECATEGORYSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The statistics of a single CATEGORY value. + Attributes: value: The CATEGORY value. @@ -1311,6 +1321,7 @@ "DESCRIPTOR": _CATEGORYSTATS, "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", "__doc__": """The data statistics of a series of CATEGORY values. + Attributes: top_category_stats: The statistics of the top 20 CATEGORY values, ordered by [cou @@ -1332,6 +1343,7 @@ "__doc__": """A correlation statistics between two series of DataType values. The series may have differing DataType-s, but within a single series the DataType must be the same. + Attributes: cramers_v: The correlation value using the Cramer’s V measure. diff --git a/google/cloud/automl_v1beta1/proto/data_types_pb2.py b/google/cloud/automl_v1beta1/proto/data_types_pb2.py index d51d15fe..cb1993a8 100644 --- a/google/cloud/automl_v1beta1/proto/data_types_pb2.py +++ b/google/cloud/automl_v1beta1/proto/data_types_pb2.py @@ -369,6 +369,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.data_types_pb2", "__doc__": """Indicated the type of data that can be stored in a structured data entity (e.g. a table). + Attributes: details: Details of DataType-s that need additional specification. @@ -421,6 +422,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.data_types_pb2", "__doc__": """\ ``StructType`` defines the DataType-s of a [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT] type. + Attributes: fields: Unordered map of struct field names to their data types. diff --git a/google/cloud/automl_v1beta1/proto/dataset_pb2.py b/google/cloud/automl_v1beta1/proto/dataset_pb2.py index fee5459e..28aa5238 100644 --- a/google/cloud/automl_v1beta1/proto/dataset_pb2.py +++ b/google/cloud/automl_v1beta1/proto/dataset_pb2.py @@ -480,6 +480,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.dataset_pb2", "__doc__": """A workspace for solving a single, particular machine learning (ML) problem. A workspace contains examples that may be annotated. + Attributes: dataset_metadata: Required. The dataset metadata that is specific to the problem diff --git a/google/cloud/automl_v1beta1/proto/detection_pb2.py b/google/cloud/automl_v1beta1/proto/detection_pb2.py index 21f66e5f..940fac4d 100644 --- a/google/cloud/automl_v1beta1/proto/detection_pb2.py +++ b/google/cloud/automl_v1beta1/proto/detection_pb2.py @@ -591,6 +591,7 @@ "DESCRIPTOR": _IMAGEOBJECTDETECTIONANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", "__doc__": """Annotation details for image object detection. + Attributes: bounding_box: Output only. The rectangle representing the object location. @@ -611,6 +612,7 @@ "DESCRIPTOR": _VIDEOOBJECTTRACKINGANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", "__doc__": """Annotation details for video object tracking. + Attributes: instance_id: Optional. The instance of the object, expressed as a positive @@ -650,6 +652,7 @@ "DESCRIPTOR": _BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY, "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", "__doc__": """Metrics for a single confidence threshold. + Attributes: confidence_threshold: Output only. The confidence threshold value used to compute @@ -668,6 +671,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", "__doc__": """Bounding box matching model metrics for a single intersection-over- union threshold and multiple label match confidence thresholds. + Attributes: iou_threshold: Output only. The intersection-over-union threshold value used @@ -694,6 +698,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", "__doc__": """Model evaluation metrics for image object detection problems. Evaluates prediction quality of labeled bounding boxes. + Attributes: evaluated_bounding_box_count: Output only. The total number of bounding boxes (i.e. summed @@ -723,6 +728,7 @@ "__doc__": """Model evaluation metrics for video object tracking problems. Evaluates prediction quality of both labeled bounding boxes and labeled tracks (i.e. series of bounding boxes sharing same label and instance ID). + Attributes: evaluated_frame_count: Output only. The number of video frames used to create this diff --git a/google/cloud/automl_v1beta1/proto/geometry_pb2.py b/google/cloud/automl_v1beta1/proto/geometry_pb2.py index 6d06a5fa..2d355059 100644 --- a/google/cloud/automl_v1beta1/proto/geometry_pb2.py +++ b/google/cloud/automl_v1beta1/proto/geometry_pb2.py @@ -138,6 +138,7 @@ "DESCRIPTOR": _NORMALIZEDVERTEX, "__module__": "google.cloud.automl_v1beta1.proto.geometry_pb2", "__doc__": """Required. Horizontal coordinate. + Attributes: y: Required. Vertical coordinate. @@ -156,6 +157,7 @@ "__doc__": """A bounding polygon of a detected object on a plane. On output both vertices and normalized_vertices are provided. The polygon is formed by connecting vertices in the order they are listed. + Attributes: normalized_vertices: Output only . The bounding polygon normalized vertices. diff --git a/google/cloud/automl_v1beta1/proto/image_pb2.py b/google/cloud/automl_v1beta1/proto/image_pb2.py index fe782edd..73765cfa 100644 --- a/google/cloud/automl_v1beta1/proto/image_pb2.py +++ b/google/cloud/automl_v1beta1/proto/image_pb2.py @@ -506,6 +506,7 @@ "DESCRIPTOR": _IMAGECLASSIFICATIONDATASETMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", "__doc__": """Dataset metadata that is specific to image classification. + Attributes: classification_type: Required. Type of the classification problem. @@ -534,6 +535,7 @@ "DESCRIPTOR": _IMAGECLASSIFICATIONMODELMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", "__doc__": """Model metadata for image classification. + Attributes: base_model_id: Optional. The ID of the ``base`` model. If it is specified, @@ -610,6 +612,7 @@ "DESCRIPTOR": _IMAGEOBJECTDETECTIONMODELMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", "__doc__": """Model metadata specific to image object detection. + Attributes: model_type: Optional. Type of the model. The available values are: \* @@ -682,6 +685,7 @@ "DESCRIPTOR": _IMAGECLASSIFICATIONMODELDEPLOYMENTMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", "__doc__": """Model deployment metadata specific to Image Classification. + Attributes: node_count: Input only. The number of nodes to deploy the model on. A node @@ -702,6 +706,7 @@ "DESCRIPTOR": _IMAGEOBJECTDETECTIONMODELDEPLOYMENTMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", "__doc__": """Model deployment metadata specific to Image Object Detection. + Attributes: node_count: Input only. The number of nodes to deploy the model on. A node diff --git a/google/cloud/automl_v1beta1/proto/io_pb2.py b/google/cloud/automl_v1beta1/proto/io_pb2.py index 8cfdba9f..36ca7cd0 100644 --- a/google/cloud/automl_v1beta1/proto/io_pb2.py +++ b/google/cloud/automl_v1beta1/proto/io_pb2.py @@ -1203,6 +1203,7 @@ operation fails and nothing is imported. Regardless of overall success or failure the per-row failures, up to a certain count cap, is listed in Operation.metadata.partial_failures. + Attributes: source: The source of the input. @@ -1358,6 +1359,7 @@ the operation fails and prediction does not happen. Regardless of overall success or failure the per-row failures, up to a certain count cap, will be listed in Operation.metadata.partial_failures. + Attributes: source: Required. The source of the input. @@ -1379,6 +1381,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", "__doc__": """Input configuration of a [Document][google.cloud.automl.v1beta1.Document]. + Attributes: gcs_source: The Google Cloud Storage location of the document file. Only a @@ -1416,6 +1419,7 @@ on ISO-8601” format. In that dataset a new table called ``primary_table`` will be created, and filled with precisely the same data as this obtained on import. + Attributes: destination: Required. The destination of the output. @@ -1652,6 +1656,7 @@ and as a value has ```google.rpc.Status`` `__ represented as a STRUCT, and containing only ``code`` and ``message``. + Attributes: destination: Required. The destination of the output. @@ -1682,6 +1687,7 @@ "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", "__doc__": """Output configuration for ModelExport Action. + Attributes: destination: Required. The destination of the output. @@ -1763,6 +1769,7 @@ [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], represented as STRUCT-s, containing [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. + Attributes: destination: Required. The destination of the output. @@ -1781,6 +1788,7 @@ "DESCRIPTOR": _GCSSOURCE, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", "__doc__": """The Google Cloud Storage location for the input content. + Attributes: input_uris: Required. Google Cloud Storage URIs to input files, up to 2000 @@ -1799,6 +1807,7 @@ "DESCRIPTOR": _BIGQUERYSOURCE, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", "__doc__": """The BigQuery location for the input content. + Attributes: input_uri: Required. BigQuery URI to a table, up to 2000 characters long. @@ -1818,6 +1827,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", "__doc__": """The Google Cloud Storage location where the output is to be written to. + Attributes: output_uri_prefix: Required. Google Cloud Storage URI to output directory, up to @@ -1838,6 +1848,7 @@ "DESCRIPTOR": _BIGQUERYDESTINATION, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", "__doc__": """The BigQuery location for the output content. + Attributes: output_uri: Required. BigQuery URI to a project, up to 2000 characters @@ -1855,6 +1866,7 @@ "DESCRIPTOR": _GCRDESTINATION, "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", "__doc__": """The GCR location where the image must be pushed to. + Attributes: output_uri: Required. Google Contained Registry URI of the new image, up diff --git a/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py b/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py index f987190b..4dee2ad7 100644 --- a/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py +++ b/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py @@ -408,6 +408,7 @@ "DESCRIPTOR": _MODELEVALUATION, "__module__": "google.cloud.automl_v1beta1.proto.model_evaluation_pb2", "__doc__": """Evaluation results of a model. + Attributes: metrics: Output only. Problem type specific evaluation metrics. diff --git a/google/cloud/automl_v1beta1/proto/model_pb2.py b/google/cloud/automl_v1beta1/proto/model_pb2.py index 2f0e369a..cf935cca 100644 --- a/google/cloud/automl_v1beta1/proto/model_pb2.py +++ b/google/cloud/automl_v1beta1/proto/model_pb2.py @@ -524,6 +524,7 @@ "DESCRIPTOR": _MODEL, "__module__": "google.cloud.automl_v1beta1.proto.model_pb2", "__doc__": """API proto representing a trained machine learning model. + Attributes: model_metadata: Required. The model metadata that is specific to the problem diff --git a/google/cloud/automl_v1beta1/proto/operations_pb2.py b/google/cloud/automl_v1beta1/proto/operations_pb2.py index ee32f2aa..a6811b08 100644 --- a/google/cloud/automl_v1beta1/proto/operations_pb2.py +++ b/google/cloud/automl_v1beta1/proto/operations_pb2.py @@ -1034,6 +1034,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", "__doc__": """Metadata used across all long running operations returned by AutoML API. + Attributes: details: Ouptut only. Details of specific operation. Even if this field @@ -1148,6 +1149,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", "__doc__": """Further describes this export data’s output. Supplements [OutputConfig][google.cloud.automl.v1beta1.OutputConfig]. + Attributes: output_location: The output location to which the exported data is written. @@ -1165,6 +1167,7 @@ "DESCRIPTOR": _EXPORTDATAOPERATIONMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", "__doc__": """Details of ExportData operation. + Attributes: output_info: Output only. Information further describing this export data’s @@ -1189,6 +1192,7 @@ "__doc__": """Further describes this batch predict’s output. Supplements [BatchPred ictOutputConfig][google.cloud.automl.v1beta1.BatchPredictOutputConfig] . + Attributes: output_location: The output location into which prediction output is written. @@ -1206,6 +1210,7 @@ "DESCRIPTOR": _BATCHPREDICTOPERATIONMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", "__doc__": """Details of BatchPredict operation. + Attributes: input_config: Output only. The input config that was given upon starting @@ -1232,6 +1237,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", "__doc__": """Further describes the output of model export. Supplements [ModelExpor tOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. + Attributes: gcs_output_directory: The full path of the Google Cloud Storage directory created, @@ -1243,6 +1249,7 @@ "DESCRIPTOR": _EXPORTMODELOPERATIONMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", "__doc__": """Details of ExportModel operation. + Attributes: output_info: Output only. Information further describing the output of this @@ -1267,6 +1274,7 @@ "__doc__": """Further describes the output of the evaluated examples export. Supplements [ExportEvaluatedExamplesOutputConfig][google.cloud.automl .v1beta1.ExportEvaluatedExamplesOutputConfig]. + Attributes: bigquery_output_dataset: The path of the BigQuery dataset created, in @@ -1279,6 +1287,7 @@ "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", "__doc__": """Details of EvaluatedExamples operation. + Attributes: output_info: Output only. Information further describing the output of this diff --git a/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py b/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py index 248338d5..b22759e8 100644 --- a/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py +++ b/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py @@ -648,6 +648,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.prediction_service_pb2", "__doc__": """Request message for [PredictionService.Predict][google.cloud.automl.v1 beta1.PredictionService.Predict]. + Attributes: name: Required. Name of the model requested to serve the prediction. @@ -694,6 +695,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.prediction_service_pb2", "__doc__": """Response message for [PredictionService.Predict][google.cloud.automl.v 1beta1.PredictionService.Predict]. + Attributes: payload: Prediction result. Translation and Text Sentiment will return @@ -742,6 +744,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.prediction_service_pb2", "__doc__": """Request message for [PredictionService.BatchPredict][google.cloud.auto ml.v1beta1.PredictionService.BatchPredict]. + Attributes: name: Required. Name of the model requested to serve the batch @@ -835,6 +838,7 @@ [response][google.longrunning.Operation.response] of the operation returned by the [PredictionService.BatchPredict][google.cloud.automl.v 1beta1.PredictionService.BatchPredict]. + Attributes: metadata: Additional domain-specific prediction response metadata. - diff --git a/google/cloud/automl_v1beta1/proto/ranges_pb2.py b/google/cloud/automl_v1beta1/proto/ranges_pb2.py index bf9c18e1..3c0a34a7 100644 --- a/google/cloud/automl_v1beta1/proto/ranges_pb2.py +++ b/google/cloud/automl_v1beta1/proto/ranges_pb2.py @@ -95,6 +95,7 @@ "DESCRIPTOR": _DOUBLERANGE, "__module__": "google.cloud.automl_v1beta1.proto.ranges_pb2", "__doc__": """A range between two double numbers. + Attributes: start: Start of the range, inclusive. diff --git a/google/cloud/automl_v1beta1/proto/regression_pb2.py b/google/cloud/automl_v1beta1/proto/regression_pb2.py index 1da010b9..807e3147 100644 --- a/google/cloud/automl_v1beta1/proto/regression_pb2.py +++ b/google/cloud/automl_v1beta1/proto/regression_pb2.py @@ -154,6 +154,7 @@ "DESCRIPTOR": _REGRESSIONEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1beta1.proto.regression_pb2", "__doc__": """Metrics for regression problems. + Attributes: root_mean_squared_error: Output only. Root Mean Squared Error (RMSE). diff --git a/google/cloud/automl_v1beta1/proto/service_pb2.py b/google/cloud/automl_v1beta1/proto/service_pb2.py index 05beff01..50e77bf1 100644 --- a/google/cloud/automl_v1beta1/proto/service_pb2.py +++ b/google/cloud/automl_v1beta1/proto/service_pb2.py @@ -2098,6 +2098,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.CreateDataset][google.cloud.automl.v1beta1 .AutoMl.CreateDataset]. + Attributes: parent: Required. The resource name of the project to create the @@ -2118,6 +2119,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. + Attributes: name: Required. The resource name of the dataset to retrieve. @@ -2135,6 +2137,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1. AutoMl.ListDatasets]. + Attributes: parent: Required. The resource name of the project from which to list @@ -2169,6 +2172,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Response message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1 .AutoMl.ListDatasets]. + Attributes: datasets: The datasets read. @@ -2190,6 +2194,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1beta1 .AutoMl.UpdateDataset] + Attributes: dataset: Required. The dataset which replaces the resource on the @@ -2210,6 +2215,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1beta1 .AutoMl.DeleteDataset]. + Attributes: name: Required. The resource name of the dataset to delete. @@ -2227,6 +2233,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. + Attributes: name: Required. Dataset name. Dataset must already exist. All @@ -2248,6 +2255,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. + Attributes: name: Required. The resource name of the dataset. @@ -2267,6 +2275,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1b eta1.AutoMl.GetAnnotationSpec]. + Attributes: name: Required. The resource name of the annotation spec to @@ -2285,6 +2294,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.GetTableSpec][google.cloud.automl.v1beta1. AutoMl.GetTableSpec]. + Attributes: name: Required. The resource name of the table spec to retrieve. @@ -2304,6 +2314,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ListTableSpecs][google.cloud.automl.v1beta 1.AutoMl.ListTableSpecs]. + Attributes: parent: Required. The resource name of the dataset to list table specs @@ -2336,6 +2347,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Response message for [AutoMl.ListTableSpecs][google.cloud.automl.v1bet a1.AutoMl.ListTableSpecs]. + Attributes: table_specs: The table specs read. @@ -2357,6 +2369,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.UpdateTableSpec][google.cloud.automl.v1bet a1.AutoMl.UpdateTableSpec] + Attributes: table_spec: Required. The table spec which replaces the resource on the @@ -2377,6 +2390,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1 .AutoMl.GetColumnSpec]. + Attributes: name: Required. The resource name of the column spec to retrieve. @@ -2396,6 +2410,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1bet a1.AutoMl.ListColumnSpecs]. + Attributes: parent: Required. The resource name of the table spec to list column @@ -2429,6 +2444,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Response message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1be ta1.AutoMl.ListColumnSpecs]. + Attributes: column_specs: The column specs read. @@ -2450,6 +2466,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.UpdateColumnSpec][google.cloud.automl.v1be ta1.AutoMl.UpdateColumnSpec] + Attributes: column_spec: Required. The column spec which replaces the resource on the @@ -2470,6 +2487,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. + Attributes: parent: Required. Resource name of the parent project where the model @@ -2490,6 +2508,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. + Attributes: name: Required. Resource name of the model. @@ -2507,6 +2526,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + Attributes: parent: Required. Resource name of the project, from which to list the @@ -2541,6 +2561,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Response message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + Attributes: model: List of models in the requested page. @@ -2562,6 +2583,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. + Attributes: name: Required. Resource name of the model being deleted. @@ -2579,6 +2601,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. + Attributes: model_deployment_metadata: The per-domain specific deployment parameters. @@ -2602,6 +2625,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.UndeployModel][google.cloud.automl.v1beta1 .AutoMl.UndeployModel]. + Attributes: name: Required. Resource name of the model to undeploy. @@ -2621,6 +2645,7 @@ [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. Models need to be enabled for exporting, otherwise an error code will be returned. + Attributes: name: Required. The resource name of the model to export. @@ -2640,6 +2665,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ExportEvaluatedExamples][google.cloud.auto ml.v1beta1.AutoMl.ExportEvaluatedExamples]. + Attributes: name: Required. The resource name of the model whose evaluated @@ -2660,6 +2686,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1 beta1.AutoMl.GetModelEvaluation]. + Attributes: name: Required. Resource name for the model evaluation. @@ -2677,6 +2704,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Request message for [AutoMl.ListModelEvaluations][google.cloud.automl. v1beta1.AutoMl.ListModelEvaluations]. + Attributes: parent: Required. Resource name of the model to list the model @@ -2714,6 +2742,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", "__doc__": """Response message for [AutoMl.ListModelEvaluations][google.cloud.automl .v1beta1.AutoMl.ListModelEvaluations]. + Attributes: model_evaluation: List of model evaluations in the requested page. diff --git a/google/cloud/automl_v1beta1/proto/table_spec_pb2.py b/google/cloud/automl_v1beta1/proto/table_spec_pb2.py index 66d30724..a6934cea 100644 --- a/google/cloud/automl_v1beta1/proto/table_spec_pb2.py +++ b/google/cloud/automl_v1beta1/proto/table_spec_pb2.py @@ -207,6 +207,7 @@ a table, at times the schema may be inconsistent with the data in the table (e.g. string in a FLOAT64 column). The consistency validation is done upon creation of a model. Used by: \* Tables + Attributes: name: Output only. The resource name of the table spec. Form: ``pro diff --git a/google/cloud/automl_v1beta1/proto/tables_pb2.py b/google/cloud/automl_v1beta1/proto/tables_pb2.py index 364ed1b8..96fb1098 100644 --- a/google/cloud/automl_v1beta1/proto/tables_pb2.py +++ b/google/cloud/automl_v1beta1/proto/tables_pb2.py @@ -732,6 +732,7 @@ "DESCRIPTOR": _TABLESDATASETMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", "__doc__": """Metadata for a dataset used for AutoML Tables. + Attributes: primary_table_spec_id: Output only. The table_spec_id of the primary table of this @@ -800,6 +801,7 @@ "DESCRIPTOR": _TABLESMODELMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", "__doc__": """Model metadata specific to AutoML Tables. + Attributes: additional_optimization_objective_config: Additional optimization objective configuration. Required for @@ -892,6 +894,7 @@ "DESCRIPTOR": _TABLESANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", "__doc__": """Contains annotation details specific to Tables. + Attributes: score: Output only. A confidence estimate between 0.0 and 1.0, @@ -944,6 +947,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", "__doc__": """An information specific to given column and Tables Model, in context of the Model and the predictions created by it. + Attributes: column_spec_name: Output only. The name of the ColumnSpec describing the column. diff --git a/google/cloud/automl_v1beta1/proto/temporal_pb2.py b/google/cloud/automl_v1beta1/proto/temporal_pb2.py index 20b9f802..309c4644 100644 --- a/google/cloud/automl_v1beta1/proto/temporal_pb2.py +++ b/google/cloud/automl_v1beta1/proto/temporal_pb2.py @@ -106,6 +106,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.temporal_pb2", "__doc__": """A time period inside of an example that has a time dimension (e.g. video). + Attributes: start_time_offset: Start of the time segment (inclusive), represented as the diff --git a/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py b/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py index e3efded5..ab21cf1e 100644 --- a/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py @@ -287,6 +287,7 @@ "DESCRIPTOR": _TEXTEXTRACTIONANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.text_extraction_pb2", "__doc__": """Annotation for identifying spans of text. + Attributes: annotation: Required. Text extraction annotations can either be a text @@ -315,6 +316,7 @@ "DESCRIPTOR": _TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, "__module__": "google.cloud.automl_v1beta1.proto.text_extraction_pb2", "__doc__": """Metrics for a single confidence threshold. + Attributes: confidence_threshold: Output only. The confidence threshold value used to compute @@ -333,6 +335,7 @@ "DESCRIPTOR": _TEXTEXTRACTIONEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1beta1.proto.text_extraction_pb2", "__doc__": """Model evaluation metrics for text extraction problems. + Attributes: au_prc: Output only. The Area under precision recall curve metric. diff --git a/google/cloud/automl_v1beta1/proto/text_pb2.py b/google/cloud/automl_v1beta1/proto/text_pb2.py index 3b36d894..6d9b725d 100644 --- a/google/cloud/automl_v1beta1/proto/text_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_pb2.py @@ -254,6 +254,7 @@ "DESCRIPTOR": _TEXTCLASSIFICATIONDATASETMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", "__doc__": """Dataset metadata for classification. + Attributes: classification_type: Required. Type of the classification problem. @@ -270,6 +271,7 @@ "DESCRIPTOR": _TEXTCLASSIFICATIONMODELMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", "__doc__": """Model metadata that is specific to text classification. + Attributes: classification_type: Output only. Classification type of the dataset used to train @@ -311,6 +313,7 @@ "DESCRIPTOR": _TEXTSENTIMENTDATASETMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", "__doc__": """Dataset metadata for text sentiment. + Attributes: sentiment_max: Required. A sentiment is expressed as an integer ordinal, diff --git a/google/cloud/automl_v1beta1/proto/text_segment_pb2.py b/google/cloud/automl_v1beta1/proto/text_segment_pb2.py index 4327112e..ed5ae997 100644 --- a/google/cloud/automl_v1beta1/proto/text_segment_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_segment_pb2.py @@ -115,6 +115,7 @@ "__module__": "google.cloud.automl_v1beta1.proto.text_segment_pb2", "__doc__": """A contiguous part of a text (string), assuming it has an UTF-8 NFC encoding. + Attributes: content: Output only. The content of the TextSegment. diff --git a/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py b/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py index c1e80777..1332660d 100644 --- a/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py @@ -283,6 +283,7 @@ "DESCRIPTOR": _TEXTSENTIMENTANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.text_sentiment_pb2", "__doc__": """Contains annotation details specific to text sentiment. + Attributes: sentiment: Output only. The sentiment with the semantic, as given to the @@ -313,6 +314,7 @@ "DESCRIPTOR": _TEXTSENTIMENTEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1beta1.proto.text_sentiment_pb2", "__doc__": """Model evaluation metrics for text sentiment problems. + Attributes: precision: Output only. Precision. diff --git a/google/cloud/automl_v1beta1/proto/translation_pb2.py b/google/cloud/automl_v1beta1/proto/translation_pb2.py index bc17be61..b5df3e32 100644 --- a/google/cloud/automl_v1beta1/proto/translation_pb2.py +++ b/google/cloud/automl_v1beta1/proto/translation_pb2.py @@ -295,6 +295,7 @@ "DESCRIPTOR": _TRANSLATIONDATASETMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", "__doc__": """Dataset metadata that is specific to translation. + Attributes: source_language_code: Required. The BCP-47 language code of the source language. @@ -313,6 +314,7 @@ "DESCRIPTOR": _TRANSLATIONEVALUATIONMETRICS, "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", "__doc__": """Evaluation metrics for the dataset. + Attributes: bleu_score: Output only. BLEU score. @@ -331,6 +333,7 @@ "DESCRIPTOR": _TRANSLATIONMODELMETADATA, "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", "__doc__": """Model metadata that is specific to translation. + Attributes: base_model: The resource name of the model to use as a baseline to train @@ -356,6 +359,7 @@ "DESCRIPTOR": _TRANSLATIONANNOTATION, "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", "__doc__": """Annotation details specific to translation. + Attributes: translated_content: Output only . The translated content. diff --git a/synth.metadata b/synth.metadata index 9d23d01f..b7859a0e 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "3a4894c4f0da3e763aca2c67bd280ae915177450", - "internalRef": "314363155" + "sha": "184661793fbe3b89f2b485c303e7466cef9d21a1", + "internalRef": "316182409" } }, { From 0507c8d267a78125d584c969bc73434ec1cd0166 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Tue, 23 Jun 2020 05:54:20 -0700 Subject: [PATCH 6/9] fix: migrate automl/v1 to grpc_service_config PiperOrigin-RevId: 317760936 Source-Author: Google APIs Source-Date: Mon Jun 22 16:41:25 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: dd25bfd61bce9a24c7a7793af4fb7ac61e3f6542 Source-Link: https://github.com/googleapis/googleapis/commit/dd25bfd61bce9a24c7a7793af4fb7ac61e3f6542 --- .../cloud/automl_v1/gapic/auto_ml_client.py | 344 +++++++++--------- .../automl_v1/gapic/auto_ml_client_config.py | 139 ++++--- .../gapic/prediction_service_client_config.py | 38 +- .../transports/auto_ml_grpc_transport.py | 60 +-- synth.metadata | 4 +- tests/unit/gapic/v1/test_auto_ml_client_v1.py | 200 +++++----- 6 files changed, 405 insertions(+), 380 deletions(-) diff --git a/google/cloud/automl_v1/gapic/auto_ml_client.py b/google/cloud/automl_v1/gapic/auto_ml_client.py index 6640df12..8cd6ee34 100644 --- a/google/cloud/automl_v1/gapic/auto_ml_client.py +++ b/google/cloud/automl_v1/gapic/auto_ml_client.py @@ -266,178 +266,6 @@ def __init__( self._inner_api_calls = {} # Service calls - def delete_dataset( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a dataset and all of its contents. Returns empty response in - the ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') - >>> - >>> response = client.delete_dataset(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. The resource name of the dataset to delete. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_dataset" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_dataset" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_dataset, - default_retry=self._method_configs["DeleteDataset"].retry, - default_timeout=self._method_configs["DeleteDataset"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.DeleteDatasetRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["delete_dataset"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - - def delete_model( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a model. Returns ``google.protobuf.Empty`` in the - ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> response = client.delete_model(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. Resource name of the model being deleted. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_model" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_model, - default_retry=self._method_configs["DeleteModel"].retry, - default_timeout=self._method_configs["DeleteModel"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.DeleteModelRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["delete_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - def create_dataset( self, parent, @@ -794,6 +622,92 @@ def update_dataset( request, retry=retry, timeout=timeout, metadata=metadata ) + def delete_dataset( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Deletes a dataset and all of its contents. Returns empty response in + the ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. + + Example: + >>> from google.cloud import automl_v1 + >>> + >>> client = automl_v1.AutoMlClient() + >>> + >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') + >>> + >>> response = client.delete_dataset(name) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + name (str): Required. The resource name of the dataset to delete. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "delete_dataset" not in self._inner_api_calls: + self._inner_api_calls[ + "delete_dataset" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_dataset, + default_retry=self._method_configs["DeleteDataset"].retry, + default_timeout=self._method_configs["DeleteDataset"].timeout, + client_info=self._client_info, + ) + + request = service_pb2.DeleteDatasetRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["delete_dataset"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, + ) + def import_data( self, name, @@ -1336,6 +1250,92 @@ def list_models( ) return iterator + def delete_model( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Deletes a model. Returns ``google.protobuf.Empty`` in the + ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. + + Example: + >>> from google.cloud import automl_v1 + >>> + >>> client = automl_v1.AutoMlClient() + >>> + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') + >>> + >>> response = client.delete_model(name) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + name (str): Required. Resource name of the model being deleted. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.automl_v1.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "delete_model" not in self._inner_api_calls: + self._inner_api_calls[ + "delete_model" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_model, + default_retry=self._method_configs["DeleteModel"].retry, + default_timeout=self._method_configs["DeleteModel"].timeout, + client_info=self._client_info, + ) + + request = service_pb2.DeleteModelRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["delete_model"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, + ) + def update_model( self, model, diff --git a/google/cloud/automl_v1/gapic/auto_ml_client_config.py b/google/cloud/automl_v1/gapic/auto_ml_client_config.py index 10a58643..0c89b881 100644 --- a/google/cloud/automl_v1/gapic/auto_ml_client_config.py +++ b/google/cloud/automl_v1/gapic/auto_ml_client_config.py @@ -2,110 +2,129 @@ "interfaces": { "google.cloud.automl.v1.AutoMl": { "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], + "retry_policy_1_codes": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], + "no_retry_codes": [], + "no_retry_1_codes": [], }, "retry_params": { - "default": { + "retry_policy_1_params": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, + "initial_rpc_timeout_millis": 5000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } + "max_rpc_timeout_millis": 5000, + "total_timeout_millis": 5000, + }, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, + "no_retry_1_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 5000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 5000, + "total_timeout_millis": 5000, + }, }, "methods": { - "DeleteDataset": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "DeleteModel": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, "CreateDataset": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "GetDataset": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ListDatasets": { "timeout_millis": 50000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "UpdateDataset": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", + }, + "DeleteDataset": { + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ImportData": { "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "ExportData": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "GetAnnotationSpec": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "CreateModel": { "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "GetModel": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ListModels": { "timeout_millis": 50000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", + }, + "DeleteModel": { + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "UpdateModel": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "DeployModel": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "UndeployModel": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "ExportModel": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "GetModelEvaluation": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ListModelEvaluations": { "timeout_millis": 50000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, }, } diff --git a/google/cloud/automl_v1/gapic/prediction_service_client_config.py b/google/cloud/automl_v1/gapic/prediction_service_client_config.py index bcfb22ff..e4b1a44f 100644 --- a/google/cloud/automl_v1/gapic/prediction_service_client_config.py +++ b/google/cloud/automl_v1/gapic/prediction_service_client_config.py @@ -1,31 +1,37 @@ config = { "interfaces": { "google.cloud.automl.v1.PredictionService": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, + "retry_codes": {"no_retry_2_codes": [], "no_retry_codes": []}, "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, + "no_retry_2_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 60000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 60000, + }, }, "methods": { "Predict": { "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, "BatchPredict": { "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, }, } diff --git a/google/cloud/automl_v1/gapic/transports/auto_ml_grpc_transport.py b/google/cloud/automl_v1/gapic/transports/auto_ml_grpc_transport.py index 2f8d0531..6ebffac5 100644 --- a/google/cloud/automl_v1/gapic/transports/auto_ml_grpc_transport.py +++ b/google/cloud/automl_v1/gapic/transports/auto_ml_grpc_transport.py @@ -115,36 +115,6 @@ def channel(self): """ return self._channel - @property - def delete_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.delete_dataset`. - - Deletes a dataset and all of its contents. Returns empty response in - the ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].DeleteDataset - - @property - def delete_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.delete_model`. - - Deletes a model. Returns ``google.protobuf.Empty`` in the - ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].DeleteModel - @property def create_dataset(self): """Return the gRPC stub for :meth:`AutoMlClient.create_dataset`. @@ -197,6 +167,21 @@ def update_dataset(self): """ return self._stubs["auto_ml_stub"].UpdateDataset + @property + def delete_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.delete_dataset`. + + Deletes a dataset and all of its contents. Returns empty response in + the ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["auto_ml_stub"].DeleteDataset + @property def import_data(self): """Return the gRPC stub for :meth:`AutoMlClient.import_data`. @@ -286,6 +271,21 @@ def list_models(self): """ return self._stubs["auto_ml_stub"].ListModels + @property + def delete_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.delete_model`. + + Deletes a model. Returns ``google.protobuf.Empty`` in the + ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["auto_ml_stub"].DeleteModel + @property def update_model(self): """Return the gRPC stub for :meth:`AutoMlClient.update_model`. diff --git a/synth.metadata b/synth.metadata index b7859a0e..a6dc5e61 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "184661793fbe3b89f2b485c303e7466cef9d21a1", - "internalRef": "316182409" + "sha": "dd25bfd61bce9a24c7a7793af4fb7ac61e3f6542", + "internalRef": "317760936" } }, { diff --git a/tests/unit/gapic/v1/test_auto_ml_client_v1.py b/tests/unit/gapic/v1/test_auto_ml_client_v1.py index 0e83fb9a..1b8ae7d5 100644 --- a/tests/unit/gapic/v1/test_auto_ml_client_v1.py +++ b/tests/unit/gapic/v1/test_auto_ml_client_v1.py @@ -70,106 +70,6 @@ class CustomException(Exception): class TestAutoMlClient(object): - def test_delete_dataset(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_delete_dataset", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - response = client.delete_dataset(name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.DeleteDatasetRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_dataset_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_delete_dataset_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - response = client.delete_dataset(name) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_model(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_delete_model", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.delete_model(name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = service_pb2.DeleteModelRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_delete_model_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - response = client.delete_model(name) - exception = response.exception() - assert exception.errors[0] == error - def test_create_dataset(self): # Setup Expected Response name = "name3373707" @@ -378,6 +278,56 @@ def test_update_dataset_exception(self): with pytest.raises(CustomException): client.update_dataset(dataset, update_mask) + def test_delete_dataset(self): + # Setup Expected Response + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_delete_dataset", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1.AutoMlClient() + + # Setup Request + name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") + + response = client.delete_dataset(name) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = service_pb2.DeleteDatasetRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_dataset_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_delete_dataset_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1.AutoMlClient() + + # Setup Request + name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") + + response = client.delete_dataset(name) + exception = response.exception() + assert exception.errors[0] == error + def test_import_data(self): # Setup Expected Response expected_response = {} @@ -684,6 +634,56 @@ def test_list_models_exception(self): with pytest.raises(CustomException): list(paged_list_response) + def test_delete_model(self): + # Setup Expected Response + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_delete_model", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1.AutoMlClient() + + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + + response = client.delete_model(name) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = service_pb2.DeleteModelRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_model_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_delete_model_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1.AutoMlClient() + + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + + response = client.delete_model(name) + exception = response.exception() + assert exception.errors[0] == error + def test_update_model(self): # Setup Expected Response name = "name3373707" From 831335244273462d2a8b0c43d8e6da4183fec6fc Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Tue, 23 Jun 2020 05:54:47 -0700 Subject: [PATCH 7/9] fix: migrate automl/v1beta1 to grpc_service_config PiperOrigin-RevId: 317760971 Source-Author: Google APIs Source-Date: Mon Jun 22 16:41:36 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 5b85137bf6fb01dcf8a949a6a04eee6ed0c22bec Source-Link: https://github.com/googleapis/googleapis/commit/5b85137bf6fb01dcf8a949a6a04eee6ed0c22bec --- .../automl_v1beta1/gapic/auto_ml_client.py | 1400 ++++++++--------- .../gapic/auto_ml_client_config.py | 199 +-- .../gapic/prediction_service_client_config.py | 34 +- .../transports/auto_ml_grpc_transport.py | 214 +-- synth.metadata | 4 +- .../v1beta1/test_auto_ml_client_v1beta1.py | 722 ++++----- 6 files changed, 1299 insertions(+), 1274 deletions(-) diff --git a/google/cloud/automl_v1beta1/gapic/auto_ml_client.py b/google/cloud/automl_v1beta1/gapic/auto_ml_client.py index ae6bf179..05169685 100644 --- a/google/cloud/automl_v1beta1/gapic/auto_ml_client.py +++ b/google/cloud/automl_v1beta1/gapic/auto_ml_client.py @@ -291,38 +291,35 @@ def __init__( self._inner_api_calls = {} # Service calls - def delete_dataset( + def create_dataset( self, - name, + parent, + dataset, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Deletes a dataset and all of its contents. Returns empty response in - the ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. + Creates a dataset. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') - >>> - >>> response = client.delete_dataset(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() + >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> - >>> response.add_done_callback(callback) + >>> # TODO: Initialize `dataset`: + >>> dataset = {} >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.create_dataset(parent, dataset) Args: - name (str): Required. The resource name of the dataset to delete. + parent (str): Required. The resource name of the project to create the dataset for. + dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): Required. The dataset to create. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.Dataset` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -333,7 +330,7 @@ def delete_dataset( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -343,22 +340,22 @@ def delete_dataset( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "delete_dataset" not in self._inner_api_calls: + if "create_dataset" not in self._inner_api_calls: self._inner_api_calls[ - "delete_dataset" + "create_dataset" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_dataset, - default_retry=self._method_configs["DeleteDataset"].retry, - default_timeout=self._method_configs["DeleteDataset"].timeout, + self.transport.create_dataset, + default_retry=self._method_configs["CreateDataset"].retry, + default_timeout=self._method_configs["CreateDataset"].timeout, client_info=self._client_info, ) - request = service_pb2.DeleteDatasetRequest(name=name,) + request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -367,33 +364,19 @@ def delete_dataset( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["delete_dataset"]( + return self._inner_api_calls["create_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - def import_data( + def get_dataset( self, name, - input_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Imports data into a dataset. For Tables this method can only be - called on an empty Dataset. - - For Tables: - - - A ``schema_inference_version`` parameter must be explicitly set. - Returns an empty response in the ``response`` field when it - completes. + Gets a dataset. Example: >>> from google.cloud import automl_v1beta1 @@ -402,28 +385,10 @@ def import_data( >>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> - >>> # TODO: Initialize `input_config`: - >>> input_config = {} - >>> - >>> response = client.import_data(name, input_config) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.get_dataset(name) Args: - name (str): Required. Dataset name. Dataset must already exist. All imported - annotations and examples will be added. - input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input location and its domain specific semantics, - if any. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.InputConfig` + name (str): Required. The resource name of the dataset to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -434,7 +399,7 @@ def import_data( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -444,17 +409,17 @@ def import_data( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "import_data" not in self._inner_api_calls: + if "get_dataset" not in self._inner_api_calls: self._inner_api_calls[ - "import_data" + "get_dataset" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.import_data, - default_retry=self._method_configs["ImportData"].retry, - default_timeout=self._method_configs["ImportData"].timeout, + self.transport.get_dataset, + default_retry=self._method_configs["GetDataset"].retry, + default_timeout=self._method_configs["GetDataset"].timeout, client_info=self._client_info, ) - request = service_pb2.ImportDataRequest(name=name, input_config=input_config,) + request = service_pb2.GetDatasetRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -468,55 +433,58 @@ def import_data( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["import_data"]( + return self._inner_api_calls["get_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - def export_data( + def list_datasets( self, - name, - output_config, + parent, + filter_=None, + page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Exports dataset's data to the provided output location. Returns an - empty response in the ``response`` field when it completes. + Lists datasets in a project. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') - >>> - >>> # TODO: Initialize `output_config`: - >>> output_config = {} + >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> - >>> response = client.export_data(name, output_config) + >>> # Iterate over all results + >>> for element in client.list_datasets(parent): + ... # process element + ... pass >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() >>> - >>> response.add_done_callback(callback) + >>> # Alternatively: >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> # Iterate over results one page at a time + >>> for page in client.list_datasets(parent).pages: + ... for element in page: + ... # process element + ... pass Args: - name (str): Required. The resource name of the dataset. - output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output location. + parent (str): Required. The resource name of the project from which to list datasets. + filter_ (str): An expression for filtering the results of the request. - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.OutputConfig` + - ``dataset_metadata`` - for existence of the case (e.g. + image_classification_dataset_metadata:*). Some examples of using the + filter are: + + - ``translation_dataset_metadata:*`` --> The dataset has + translation_dataset_metadata. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -527,7 +495,10 @@ def export_data( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances. + You can also iterate over the pages of the response + using its `pages` property. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -537,22 +508,24 @@ def export_data( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "export_data" not in self._inner_api_calls: + if "list_datasets" not in self._inner_api_calls: self._inner_api_calls[ - "export_data" + "list_datasets" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.export_data, - default_retry=self._method_configs["ExportData"].retry, - default_timeout=self._method_configs["ExportData"].timeout, + self.transport.list_datasets, + default_retry=self._method_configs["ListDatasets"].retry, + default_timeout=self._method_configs["ListDatasets"].timeout, client_info=self._client_info, ) - request = service_pb2.ExportDataRequest(name=name, output_config=output_config,) + request = service_pb2.ListDatasetsRequest( + parent=parent, filter=filter_, page_size=page_size, + ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -561,48 +534,51 @@ def export_data( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["export_data"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_datasets"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="datasets", + request_token_field="page_token", + response_token_field="next_page_token", ) + return iterator - def delete_model( + def update_dataset( self, - name, + dataset, + update_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Deletes a model. Returns ``google.protobuf.Empty`` in the - ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. + Updates a dataset. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> response = client.delete_model(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) + >>> # TODO: Initialize `dataset`: + >>> dataset = {} >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.update_dataset(dataset) Args: - name (str): Required. Resource name of the model being deleted. + dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): Required. The dataset which replaces the resource on the server. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.Dataset` + update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -613,7 +589,7 @@ def delete_model( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -623,22 +599,24 @@ def delete_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "delete_model" not in self._inner_api_calls: + if "update_dataset" not in self._inner_api_calls: self._inner_api_calls[ - "delete_model" + "update_dataset" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_model, - default_retry=self._method_configs["DeleteModel"].retry, - default_timeout=self._method_configs["DeleteModel"].timeout, + self.transport.update_dataset, + default_retry=self._method_configs["UpdateDataset"].retry, + default_timeout=self._method_configs["UpdateDataset"].timeout, client_info=self._client_info, ) - request = service_pb2.DeleteModelRequest(name=name,) + request = service_pb2.UpdateDatasetRequest( + dataset=dataset, update_mask=update_mask, + ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("dataset.name", dataset.name)] except AttributeError: pass else: @@ -647,44 +625,30 @@ def delete_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["delete_model"]( + return self._inner_api_calls["update_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - def export_model( + def delete_dataset( self, name, - output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Exports a trained, "export-able", model to a user specified Google - Cloud Storage location. A model is considered export-able if and only if - it has an export format defined for it in - - ``ModelExportOutputConfig``. - - Returns an empty response in the ``response`` field when it completes. + Deletes a dataset and all of its contents. Returns empty response in + the ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> # TODO: Initialize `output_config`: - >>> output_config = {} + >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> - >>> response = client.export_model(name, output_config) + >>> response = client.delete_dataset(name) >>> >>> def callback(operation_future): ... # Handle result. @@ -696,11 +660,7 @@ def export_model( >>> metadata = response.metadata() Args: - name (str): Required. The resource name of the model to export. - output_config (Union[dict, ~google.cloud.automl_v1beta1.types.ModelExportOutputConfig]): Required. The desired output location and configuration. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ModelExportOutputConfig` + name (str): Required. The resource name of the dataset to delete. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -721,19 +681,17 @@ def export_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "export_model" not in self._inner_api_calls: + if "delete_dataset" not in self._inner_api_calls: self._inner_api_calls[ - "export_model" + "delete_dataset" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.export_model, - default_retry=self._method_configs["ExportModel"].retry, - default_timeout=self._method_configs["ExportModel"].timeout, + self.transport.delete_dataset, + default_retry=self._method_configs["DeleteDataset"].retry, + default_timeout=self._method_configs["DeleteDataset"].timeout, client_info=self._client_info, ) - request = service_pb2.ExportModelRequest( - name=name, output_config=output_config, - ) + request = service_pb2.DeleteDatasetRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -747,7 +705,7 @@ def export_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["export_model"]( + operation = self._inner_api_calls["delete_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( @@ -757,39 +715,35 @@ def export_model( metadata_type=proto_operations_pb2.OperationMetadata, ) - def export_evaluated_examples( + def import_data( self, name, - output_config, + input_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Exports examples on which the model was evaluated (i.e. which were - in the TEST set of the dataset the model was created from), together - with their ground truth annotations and the annotations created - (predicted) by the model. The examples, ground truth and predictions are - exported in the state they were at the moment the model was evaluated. - - This export is available only for 30 days since the model evaluation is - created. + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. - Currently only available for Tables. + For Tables: - Returns an empty response in the ``response`` field when it completes. + - A ``schema_inference_version`` parameter must be explicitly set. + Returns an empty response in the ``response`` field when it + completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') + >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> - >>> # TODO: Initialize `output_config`: - >>> output_config = {} + >>> # TODO: Initialize `input_config`: + >>> input_config = {} >>> - >>> response = client.export_evaluated_examples(name, output_config) + >>> response = client.import_data(name, input_config) >>> >>> def callback(operation_future): ... # Handle result. @@ -801,12 +755,13 @@ def export_evaluated_examples( >>> metadata = response.metadata() Args: - name (str): Required. The resource name of the model whose evaluated examples are to - be exported. - output_config (Union[dict, ~google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig]): Required. The desired output location and configuration. + name (str): Required. Dataset name. Dataset must already exist. All imported + annotations and examples will be added. + input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input location and its domain specific semantics, + if any. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig` + message :class:`~google.cloud.automl_v1beta1.types.InputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -827,19 +782,17 @@ def export_evaluated_examples( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "export_evaluated_examples" not in self._inner_api_calls: + if "import_data" not in self._inner_api_calls: self._inner_api_calls[ - "export_evaluated_examples" + "import_data" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.export_evaluated_examples, - default_retry=self._method_configs["ExportEvaluatedExamples"].retry, - default_timeout=self._method_configs["ExportEvaluatedExamples"].timeout, + self.transport.import_data, + default_retry=self._method_configs["ImportData"].retry, + default_timeout=self._method_configs["ImportData"].timeout, client_info=self._client_info, ) - request = service_pb2.ExportEvaluatedExamplesRequest( - name=name, output_config=output_config, - ) + request = service_pb2.ImportDataRequest(name=name, input_config=input_config,) if metadata is None: metadata = [] metadata = list(metadata) @@ -853,7 +806,7 @@ def export_evaluated_examples( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["export_evaluated_examples"]( + operation = self._inner_api_calls["import_data"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( @@ -863,59 +816,45 @@ def export_evaluated_examples( metadata_type=proto_operations_pb2.OperationMetadata, ) - def list_model_evaluations( + def export_data( self, - parent, - filter_=None, - page_size=None, + name, + output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Lists model evaluations. + Exports dataset's data to the provided output location. Returns an + empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') + >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> - >>> # Iterate over all results - >>> for element in client.list_model_evaluations(parent): - ... # process element - ... pass + >>> # TODO: Initialize `output_config`: + >>> output_config = {} >>> + >>> response = client.export_data(name, output_config) >>> - >>> # Alternatively: + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_model_evaluations(parent).pages: - ... for element in page: - ... # process element - ... pass + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - parent (str): Required. Resource name of the model to list the model evaluations for. - If modelId is set as "-", this will list model evaluations from across all - models of the parent location. - filter_ (str): An expression for filtering the results of the request. - - - ``annotation_spec_id`` - for =, != or existence. See example below - for the last. - - Some examples of using the filter are: + name (str): Required. The resource name of the dataset. + output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output location. - - ``annotation_spec_id!=4`` --> The model evaluation was done for - annotation spec with ID different than 4. - - ``NOT annotation_spec_id:*`` --> The model evaluation was done for - aggregate of all annotation specs. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.OutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -926,10 +865,7 @@ def list_model_evaluations( that is provided to the method. Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. - You can also iterate over the pages of the response - using its `pages` property. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -939,24 +875,22 @@ def list_model_evaluations( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_model_evaluations" not in self._inner_api_calls: + if "export_data" not in self._inner_api_calls: self._inner_api_calls[ - "list_model_evaluations" + "export_data" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_model_evaluations, - default_retry=self._method_configs["ListModelEvaluations"].retry, - default_timeout=self._method_configs["ListModelEvaluations"].timeout, + self.transport.export_data, + default_retry=self._method_configs["ExportData"].retry, + default_timeout=self._method_configs["ExportData"].timeout, client_info=self._client_info, ) - request = service_pb2.ListModelEvaluationsRequest( - parent=parent, filter=filter_, page_size=page_size, - ) + request = service_pb2.ExportDataRequest(name=name, output_config=output_config,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -965,50 +899,37 @@ def list_model_evaluations( ) metadata.append(routing_metadata) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_model_evaluations"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="model_evaluation", - request_token_field="page_token", - response_token_field="next_page_token", + operation = self._inner_api_calls["export_data"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, ) - return iterator - def create_dataset( + def get_annotation_spec( self, - parent, - dataset, + name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Creates a dataset. + Gets an annotation spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') - >>> - >>> # TODO: Initialize `dataset`: - >>> dataset = {} + >>> name = client.annotation_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[ANNOTATION_SPEC]') >>> - >>> response = client.create_dataset(parent, dataset) + >>> response = client.get_annotation_spec(name) Args: - parent (str): Required. The resource name of the project to create the dataset for. - dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): Required. The dataset to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.Dataset` + name (str): Required. The resource name of the annotation spec to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1019,7 +940,7 @@ def create_dataset( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. + A :class:`~google.cloud.automl_v1beta1.types.AnnotationSpec` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1029,22 +950,22 @@ def create_dataset( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "create_dataset" not in self._inner_api_calls: + if "get_annotation_spec" not in self._inner_api_calls: self._inner_api_calls[ - "create_dataset" + "get_annotation_spec" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_dataset, - default_retry=self._method_configs["CreateDataset"].retry, - default_timeout=self._method_configs["CreateDataset"].timeout, + self.transport.get_annotation_spec, + default_retry=self._method_configs["GetAnnotationSpec"].retry, + default_timeout=self._method_configs["GetAnnotationSpec"].timeout, client_info=self._client_info, ) - request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset,) + request = service_pb2.GetAnnotationSpecRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -1053,31 +974,36 @@ def create_dataset( ) metadata.append(routing_metadata) - return self._inner_api_calls["create_dataset"]( + return self._inner_api_calls["get_annotation_spec"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def get_dataset( + def get_table_spec( self, name, + field_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Gets a dataset. + Gets a table spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') + >>> name = client.table_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]') >>> - >>> response = client.get_dataset(name) + >>> response = client.get_table_spec(name) Args: - name (str): Required. The resource name of the dataset to retrieve. + name (str): Required. The resource name of the table spec to retrieve. + field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1088,7 +1014,7 @@ def get_dataset( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. + A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1098,17 +1024,17 @@ def get_dataset( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_dataset" not in self._inner_api_calls: + if "get_table_spec" not in self._inner_api_calls: self._inner_api_calls[ - "get_dataset" + "get_table_spec" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_dataset, - default_retry=self._method_configs["GetDataset"].retry, - default_timeout=self._method_configs["GetDataset"].timeout, - client_info=self._client_info, + self.transport.get_table_spec, + default_retry=self._method_configs["GetTableSpec"].retry, + default_timeout=self._method_configs["GetTableSpec"].timeout, + client_info=self._client_info, ) - request = service_pb2.GetDatasetRequest(name=name,) + request = service_pb2.GetTableSpecRequest(name=name, field_mask=field_mask,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1122,13 +1048,14 @@ def get_dataset( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_dataset"]( + return self._inner_api_calls["get_table_spec"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def list_datasets( + def list_table_specs( self, parent, + field_mask=None, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, @@ -1136,17 +1063,17 @@ def list_datasets( metadata=None, ): """ - Lists datasets in a project. + Lists table specs in a dataset. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') + >>> parent = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> # Iterate over all results - >>> for element in client.list_datasets(parent): + >>> for element in client.list_table_specs(parent): ... # process element ... pass >>> @@ -1154,21 +1081,18 @@ def list_datasets( >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_datasets(parent).pages: + >>> for page in client.list_table_specs(parent).pages: ... for element in page: ... # process element ... pass Args: - parent (str): Required. The resource name of the project from which to list datasets. - filter_ (str): An expression for filtering the results of the request. - - - ``dataset_metadata`` - for existence of the case (e.g. - image_classification_dataset_metadata:*). Some examples of using the - filter are: + parent (str): Required. The resource name of the dataset to list table specs from. + field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. - - ``translation_dataset_metadata:*`` --> The dataset has - translation_dataset_metadata. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + filter_ (str): Filter expression, see go/filtering. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page @@ -1185,7 +1109,7 @@ def list_datasets( Returns: A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances. + An iterable of :class:`~google.cloud.automl_v1beta1.types.TableSpec` instances. You can also iterate over the pages of the response using its `pages` property. @@ -1197,18 +1121,18 @@ def list_datasets( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_datasets" not in self._inner_api_calls: + if "list_table_specs" not in self._inner_api_calls: self._inner_api_calls[ - "list_datasets" + "list_table_specs" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_datasets, - default_retry=self._method_configs["ListDatasets"].retry, - default_timeout=self._method_configs["ListDatasets"].timeout, + self.transport.list_table_specs, + default_retry=self._method_configs["ListTableSpecs"].retry, + default_timeout=self._method_configs["ListTableSpecs"].timeout, client_info=self._client_info, ) - request = service_pb2.ListDatasetsRequest( - parent=parent, filter=filter_, page_size=page_size, + request = service_pb2.ListTableSpecsRequest( + parent=parent, field_mask=field_mask, filter=filter_, page_size=page_size, ) if metadata is None: metadata = [] @@ -1226,44 +1150,44 @@ def list_datasets( iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( - self._inner_api_calls["list_datasets"], + self._inner_api_calls["list_table_specs"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, - items_field="datasets", + items_field="table_specs", request_token_field="page_token", response_token_field="next_page_token", ) return iterator - def update_dataset( + def update_table_spec( self, - dataset, + table_spec, update_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Updates a dataset. + Updates a table spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> # TODO: Initialize `dataset`: - >>> dataset = {} + >>> # TODO: Initialize `table_spec`: + >>> table_spec = {} >>> - >>> response = client.update_dataset(dataset) + >>> response = client.update_table_spec(table_spec) Args: - dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): Required. The dataset which replaces the resource on the server. + table_spec (Union[dict, ~google.cloud.automl_v1beta1.types.TableSpec]): Required. The table spec which replaces the resource on the server. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.Dataset` + message :class:`~google.cloud.automl_v1beta1.types.TableSpec` update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. If a dict is provided, it must be of the same form as the protobuf @@ -1278,7 +1202,7 @@ def update_dataset( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. + A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1288,93 +1212,24 @@ def update_dataset( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "update_dataset" not in self._inner_api_calls: + if "update_table_spec" not in self._inner_api_calls: self._inner_api_calls[ - "update_dataset" + "update_table_spec" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_dataset, - default_retry=self._method_configs["UpdateDataset"].retry, - default_timeout=self._method_configs["UpdateDataset"].timeout, + self.transport.update_table_spec, + default_retry=self._method_configs["UpdateTableSpec"].retry, + default_timeout=self._method_configs["UpdateTableSpec"].timeout, client_info=self._client_info, ) - request = service_pb2.UpdateDatasetRequest( - dataset=dataset, update_mask=update_mask, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("dataset.name", dataset.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_dataset"]( - request, retry=retry, timeout=timeout, metadata=metadata + request = service_pb2.UpdateTableSpecRequest( + table_spec=table_spec, update_mask=update_mask, ) - - def get_annotation_spec( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets an annotation spec. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> name = client.annotation_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[ANNOTATION_SPEC]') - >>> - >>> response = client.get_annotation_spec(name) - - Args: - name (str): Required. The resource name of the annotation spec to retrieve. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types.AnnotationSpec` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_annotation_spec" not in self._inner_api_calls: - self._inner_api_calls[ - "get_annotation_spec" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_annotation_spec, - default_retry=self._method_configs["GetAnnotationSpec"].retry, - default_timeout=self._method_configs["GetAnnotationSpec"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.GetAnnotationSpecRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("table_spec.name", table_spec.name)] except AttributeError: pass else: @@ -1383,11 +1238,11 @@ def get_annotation_spec( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_annotation_spec"]( + return self._inner_api_calls["update_table_spec"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def get_table_spec( + def get_column_spec( self, name, field_mask=None, @@ -1396,19 +1251,19 @@ def get_table_spec( metadata=None, ): """ - Gets a table spec. + Gets a column spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.table_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]') + >>> name = client.column_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]', '[COLUMN_SPEC]') >>> - >>> response = client.get_table_spec(name) + >>> response = client.get_column_spec(name) Args: - name (str): Required. The resource name of the table spec to retrieve. + name (str): Required. The resource name of the column spec to retrieve. field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. If a dict is provided, it must be of the same form as the protobuf @@ -1423,7 +1278,7 @@ def get_table_spec( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance. + A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1433,17 +1288,17 @@ def get_table_spec( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_table_spec" not in self._inner_api_calls: + if "get_column_spec" not in self._inner_api_calls: self._inner_api_calls[ - "get_table_spec" + "get_column_spec" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_table_spec, - default_retry=self._method_configs["GetTableSpec"].retry, - default_timeout=self._method_configs["GetTableSpec"].timeout, + self.transport.get_column_spec, + default_retry=self._method_configs["GetColumnSpec"].retry, + default_timeout=self._method_configs["GetColumnSpec"].timeout, client_info=self._client_info, ) - request = service_pb2.GetTableSpecRequest(name=name, field_mask=field_mask,) + request = service_pb2.GetColumnSpecRequest(name=name, field_mask=field_mask,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1457,11 +1312,11 @@ def get_table_spec( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_table_spec"]( + return self._inner_api_calls["get_column_spec"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def list_table_specs( + def list_column_specs( self, parent, field_mask=None, @@ -1472,17 +1327,17 @@ def list_table_specs( metadata=None, ): """ - Lists table specs in a dataset. + Lists column specs in a table spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') + >>> parent = client.table_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]') >>> >>> # Iterate over all results - >>> for element in client.list_table_specs(parent): + >>> for element in client.list_column_specs(parent): ... # process element ... pass >>> @@ -1490,13 +1345,13 @@ def list_table_specs( >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_table_specs(parent).pages: + >>> for page in client.list_column_specs(parent).pages: ... for element in page: ... # process element ... pass Args: - parent (str): Required. The resource name of the dataset to list table specs from. + parent (str): Required. The resource name of the table spec to list column specs from. field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. If a dict is provided, it must be of the same form as the protobuf @@ -1518,7 +1373,7 @@ def list_table_specs( Returns: A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.TableSpec` instances. + An iterable of :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instances. You can also iterate over the pages of the response using its `pages` property. @@ -1530,17 +1385,17 @@ def list_table_specs( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_table_specs" not in self._inner_api_calls: + if "list_column_specs" not in self._inner_api_calls: self._inner_api_calls[ - "list_table_specs" + "list_column_specs" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_table_specs, - default_retry=self._method_configs["ListTableSpecs"].retry, - default_timeout=self._method_configs["ListTableSpecs"].timeout, + self.transport.list_column_specs, + default_retry=self._method_configs["ListColumnSpecs"].retry, + default_timeout=self._method_configs["ListColumnSpecs"].timeout, client_info=self._client_info, ) - request = service_pb2.ListTableSpecsRequest( + request = service_pb2.ListColumnSpecsRequest( parent=parent, field_mask=field_mask, filter=filter_, page_size=page_size, ) if metadata is None: @@ -1559,44 +1414,44 @@ def list_table_specs( iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( - self._inner_api_calls["list_table_specs"], + self._inner_api_calls["list_column_specs"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, - items_field="table_specs", + items_field="column_specs", request_token_field="page_token", response_token_field="next_page_token", ) return iterator - def update_table_spec( + def update_column_spec( self, - table_spec, + column_spec, update_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Updates a table spec. + Updates a column spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> # TODO: Initialize `table_spec`: - >>> table_spec = {} + >>> # TODO: Initialize `column_spec`: + >>> column_spec = {} >>> - >>> response = client.update_table_spec(table_spec) + >>> response = client.update_column_spec(column_spec) Args: - table_spec (Union[dict, ~google.cloud.automl_v1beta1.types.TableSpec]): Required. The table spec which replaces the resource on the server. + column_spec (Union[dict, ~google.cloud.automl_v1beta1.types.ColumnSpec]): Required. The column spec which replaces the resource on the server. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.TableSpec` + message :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. If a dict is provided, it must be of the same form as the protobuf @@ -1611,7 +1466,7 @@ def update_table_spec( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance. + A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1621,24 +1476,24 @@ def update_table_spec( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "update_table_spec" not in self._inner_api_calls: + if "update_column_spec" not in self._inner_api_calls: self._inner_api_calls[ - "update_table_spec" + "update_column_spec" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_table_spec, - default_retry=self._method_configs["UpdateTableSpec"].retry, - default_timeout=self._method_configs["UpdateTableSpec"].timeout, + self.transport.update_column_spec, + default_retry=self._method_configs["UpdateColumnSpec"].retry, + default_timeout=self._method_configs["UpdateColumnSpec"].timeout, client_info=self._client_info, ) - request = service_pb2.UpdateTableSpecRequest( - table_spec=table_spec, update_mask=update_mask, + request = service_pb2.UpdateColumnSpecRequest( + column_spec=column_spec, update_mask=update_mask, ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("table_spec.name", table_spec.name)] + routing_header = [("column_spec.name", column_spec.name)] except AttributeError: pass else: @@ -1647,36 +1502,51 @@ def update_table_spec( ) metadata.append(routing_metadata) - return self._inner_api_calls["update_table_spec"]( + return self._inner_api_calls["update_column_spec"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def get_column_spec( + def create_model( self, - name, - field_mask=None, + parent, + model, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Gets a column spec. + Creates a model. Returns a Model in the ``response`` field when it + completes. When you create a model, several model evaluations are + created for it: a global evaluation, and one evaluation for each + annotation spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.column_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]', '[COLUMN_SPEC]') + >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> - >>> response = client.get_column_spec(name) + >>> # TODO: Initialize `model`: + >>> model = {} + >>> + >>> response = client.create_model(parent, model) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - name (str): Required. The resource name of the column spec to retrieve. - field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. + parent (str): Required. Resource name of the parent project where the model is being created. + model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): Required. The model to create. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + message :class:`~google.cloud.automl_v1beta1.types.Model` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1687,7 +1557,7 @@ def get_column_spec( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1697,22 +1567,22 @@ def get_column_spec( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_column_spec" not in self._inner_api_calls: + if "create_model" not in self._inner_api_calls: self._inner_api_calls[ - "get_column_spec" + "create_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_column_spec, - default_retry=self._method_configs["GetColumnSpec"].retry, - default_timeout=self._method_configs["GetColumnSpec"].timeout, + self.transport.create_model, + default_retry=self._method_configs["CreateModel"].retry, + default_timeout=self._method_configs["CreateModel"].timeout, client_info=self._client_info, ) - request = service_pb2.GetColumnSpecRequest(name=name, field_mask=field_mask,) + request = service_pb2.CreateModelRequest(parent=parent, model=model,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -1721,56 +1591,37 @@ def get_column_spec( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_column_spec"]( + operation = self._inner_api_calls["create_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + model_pb2.Model, + metadata_type=proto_operations_pb2.OperationMetadata, + ) - def list_column_specs( + def get_model( self, - parent, - field_mask=None, - filter_=None, - page_size=None, + name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Lists column specs in a table spec. + Gets a model. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.table_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]') - >>> - >>> # Iterate over all results - >>> for element in client.list_column_specs(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_column_specs(parent).pages: - ... for element in page: - ... # process element - ... pass + >>> response = client.get_model(name) Args: - parent (str): Required. The resource name of the table spec to list column specs from. - field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` - filter_ (str): Filter expression, see go/filtering. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. + name (str): Required. Resource name of the model. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1781,10 +1632,7 @@ def list_column_specs( that is provided to the method. Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instances. - You can also iterate over the pages of the response - using its `pages` property. + A :class:`~google.cloud.automl_v1beta1.types.Model` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1794,24 +1642,22 @@ def list_column_specs( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_column_specs" not in self._inner_api_calls: + if "get_model" not in self._inner_api_calls: self._inner_api_calls[ - "list_column_specs" + "get_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_column_specs, - default_retry=self._method_configs["ListColumnSpecs"].retry, - default_timeout=self._method_configs["ListColumnSpecs"].timeout, + self.transport.get_model, + default_retry=self._method_configs["GetModel"].retry, + default_timeout=self._method_configs["GetModel"].timeout, client_info=self._client_info, ) - request = service_pb2.ListColumnSpecsRequest( - parent=parent, field_mask=field_mask, filter=filter_, page_size=page_size, - ) + request = service_pb2.GetModelRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -1820,51 +1666,61 @@ def list_column_specs( ) metadata.append(routing_metadata) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_column_specs"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="column_specs", - request_token_field="page_token", - response_token_field="next_page_token", + return self._inner_api_calls["get_model"]( + request, retry=retry, timeout=timeout, metadata=metadata ) - return iterator - def update_column_spec( + def list_models( self, - column_spec, - update_mask=None, + parent, + filter_=None, + page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Updates a column spec. + Lists models. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> # TODO: Initialize `column_spec`: - >>> column_spec = {} + >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> - >>> response = client.update_column_spec(column_spec) + >>> # Iterate over all results + >>> for element in client.list_models(parent): + ... # process element + ... pass + >>> + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time + >>> for page in client.list_models(parent).pages: + ... for element in page: + ... # process element + ... pass Args: - column_spec (Union[dict, ~google.cloud.automl_v1beta1.types.ColumnSpec]): Required. The column spec which replaces the resource on the server. + parent (str): Required. Resource name of the project, from which to list the models. + filter_ (str): An expression for filtering the results of the request. - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` - update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. + - ``model_metadata`` - for existence of the case (e.g. + video_classification_model_metadata:*). - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + - ``dataset_id`` - for = or !=. Some examples of using the filter are: + + - ``image_classification_model_metadata:*`` --> The model has + image_classification_model_metadata. + + - ``dataset_id=5`` --> The model was created from a dataset with ID 5. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1875,7 +1731,10 @@ def update_column_spec( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance. + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances. + You can also iterate over the pages of the response + using its `pages` property. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1885,24 +1744,24 @@ def update_column_spec( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "update_column_spec" not in self._inner_api_calls: + if "list_models" not in self._inner_api_calls: self._inner_api_calls[ - "update_column_spec" + "list_models" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_column_spec, - default_retry=self._method_configs["UpdateColumnSpec"].retry, - default_timeout=self._method_configs["UpdateColumnSpec"].timeout, + self.transport.list_models, + default_retry=self._method_configs["ListModels"].retry, + default_timeout=self._method_configs["ListModels"].timeout, client_info=self._client_info, ) - request = service_pb2.UpdateColumnSpecRequest( - column_spec=column_spec, update_mask=update_mask, + request = service_pb2.ListModelsRequest( + parent=parent, filter=filter_, page_size=page_size, ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("column_spec.name", column_spec.name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -1911,35 +1770,41 @@ def update_column_spec( ) metadata.append(routing_metadata) - return self._inner_api_calls["update_column_spec"]( - request, retry=retry, timeout=timeout, metadata=metadata + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_models"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="model", + request_token_field="page_token", + response_token_field="next_page_token", ) + return iterator - def create_model( + def delete_model( self, - parent, - model, + name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Creates a model. Returns a Model in the ``response`` field when it - completes. When you create a model, several model evaluations are - created for it: a global evaluation, and one evaluation for each - annotation spec. + Deletes a model. Returns ``google.protobuf.Empty`` in the + ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') - >>> - >>> # TODO: Initialize `model`: - >>> model = {} + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> response = client.create_model(parent, model) + >>> response = client.delete_model(name) >>> >>> def callback(operation_future): ... # Handle result. @@ -1951,11 +1816,7 @@ def create_model( >>> metadata = response.metadata() Args: - parent (str): Required. Resource name of the parent project where the model is being created. - model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): Required. The model to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.Model` + name (str): Required. Resource name of the model being deleted. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1976,22 +1837,22 @@ def create_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "create_model" not in self._inner_api_calls: + if "delete_model" not in self._inner_api_calls: self._inner_api_calls[ - "create_model" + "delete_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_model, - default_retry=self._method_configs["CreateModel"].retry, - default_timeout=self._method_configs["CreateModel"].timeout, + self.transport.delete_model, + default_retry=self._method_configs["DeleteModel"].retry, + default_timeout=self._method_configs["DeleteModel"].timeout, client_info=self._client_info, ) - request = service_pb2.CreateModelRequest(parent=parent, model=model,) + request = service_pb2.DeleteModelRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -2000,25 +1861,38 @@ def create_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["create_model"]( + operation = self._inner_api_calls["delete_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, - model_pb2.Model, + empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) - def get_model( + def deploy_model( self, name, + image_object_detection_model_deployment_metadata=None, + image_classification_model_deployment_metadata=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Gets a model. + Deploys a model. If a model is already deployed, deploying it with + the same parameters has no effect. Deploying with different parametrs + (as e.g. changing + + ``node_number``) will reset the deployment state without pausing the + model's availability. + + Only applicable for Text Classification, Image Object Detection , + Tables, and Image Segmentation; all other domains manage deployment + automatically. + + Returns an empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 @@ -2027,10 +1901,27 @@ def get_model( >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> response = client.get_model(name) + >>> response = client.deploy_model(name) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - name (str): Required. Resource name of the model. + name (str): Required. Resource name of the model to deploy. + image_object_detection_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata]): Model deployment metadata specific to Image Object Detection. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata` + image_classification_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata]): Model deployment metadata specific to Image Classification. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2041,7 +1932,7 @@ def get_model( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.Model` instance. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -2051,17 +1942,28 @@ def get_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_model" not in self._inner_api_calls: + if "deploy_model" not in self._inner_api_calls: self._inner_api_calls[ - "get_model" + "deploy_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_model, - default_retry=self._method_configs["GetModel"].retry, - default_timeout=self._method_configs["GetModel"].timeout, + self.transport.deploy_model, + default_retry=self._method_configs["DeployModel"].retry, + default_timeout=self._method_configs["DeployModel"].timeout, client_info=self._client_info, ) - request = service_pb2.GetModelRequest(name=name,) + # Sanity check: We have some fields which are mutually exclusive; + # raise ValueError if more than one is sent. + google.api_core.protobuf_helpers.check_oneof( + image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, + image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, + ) + + request = service_pb2.DeployModelRequest( + name=name, + image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, + image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -2075,61 +1977,52 @@ def get_model( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_model"]( + operation = self._inner_api_calls["deploy_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, + ) - def list_models( + def undeploy_model( self, - parent, - filter_=None, - page_size=None, + name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Lists models. + Undeploys a model. If the model is not deployed this method has no + effect. + + Only applicable for Text Classification, Image Object Detection and + Tables; all other domains manage deployment automatically. + + Returns an empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> # Iterate over all results - >>> for element in client.list_models(parent): - ... # process element - ... pass + >>> response = client.undeploy_model(name) >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() >>> - >>> # Alternatively: + >>> response.add_done_callback(callback) >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_models(parent).pages: - ... for element in page: - ... # process element - ... pass + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - parent (str): Required. Resource name of the project, from which to list the models. - filter_ (str): An expression for filtering the results of the request. - - - ``model_metadata`` - for existence of the case (e.g. - video_classification_model_metadata:*). - - - ``dataset_id`` - for = or !=. Some examples of using the filter are: - - - ``image_classification_model_metadata:*`` --> The model has - image_classification_model_metadata. - - - ``dataset_id=5`` --> The model was created from a dataset with ID 5. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. + name (str): Required. Resource name of the model to undeploy. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2140,10 +2033,7 @@ def list_models( that is provided to the method. Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances. - You can also iterate over the pages of the response - using its `pages` property. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -2153,24 +2043,22 @@ def list_models( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_models" not in self._inner_api_calls: + if "undeploy_model" not in self._inner_api_calls: self._inner_api_calls[ - "list_models" + "undeploy_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_models, - default_retry=self._method_configs["ListModels"].retry, - default_timeout=self._method_configs["ListModels"].timeout, + self.transport.undeploy_model, + default_retry=self._method_configs["UndeployModel"].retry, + default_timeout=self._method_configs["UndeployModel"].timeout, client_info=self._client_info, ) - request = service_pb2.ListModelsRequest( - parent=parent, filter=filter_, page_size=page_size, - ) + request = service_pb2.UndeployModelRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -2179,41 +2067,30 @@ def list_models( ) metadata.append(routing_metadata) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_models"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="model", - request_token_field="page_token", - response_token_field="next_page_token", + operation = self._inner_api_calls["undeploy_model"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, ) - return iterator - def deploy_model( + def export_model( self, name, - image_object_detection_model_deployment_metadata=None, - image_classification_model_deployment_metadata=None, + output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Deploys a model. If a model is already deployed, deploying it with - the same parameters has no effect. Deploying with different parametrs - (as e.g. changing - - ``node_number``) will reset the deployment state without pausing the - model's availability. + Exports a trained, "export-able", model to a user specified Google + Cloud Storage location. A model is considered export-able if and only if + it has an export format defined for it in - Only applicable for Text Classification, Image Object Detection , - Tables, and Image Segmentation; all other domains manage deployment - automatically. + ``ModelExportOutputConfig``. Returns an empty response in the ``response`` field when it completes. @@ -2224,7 +2101,10 @@ def deploy_model( >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> response = client.deploy_model(name) + >>> # TODO: Initialize `output_config`: + >>> output_config = {} + >>> + >>> response = client.export_model(name, output_config) >>> >>> def callback(operation_future): ... # Handle result. @@ -2236,15 +2116,11 @@ def deploy_model( >>> metadata = response.metadata() Args: - name (str): Required. Resource name of the model to deploy. - image_object_detection_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata]): Model deployment metadata specific to Image Object Detection. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata` - image_classification_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata]): Model deployment metadata specific to Image Classification. + name (str): Required. The resource name of the model to export. + output_config (Union[dict, ~google.cloud.automl_v1beta1.types.ModelExportOutputConfig]): Required. The desired output location and configuration. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata` + message :class:`~google.cloud.automl_v1beta1.types.ModelExportOutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2265,27 +2141,18 @@ def deploy_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "deploy_model" not in self._inner_api_calls: + if "export_model" not in self._inner_api_calls: self._inner_api_calls[ - "deploy_model" + "export_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.deploy_model, - default_retry=self._method_configs["DeployModel"].retry, - default_timeout=self._method_configs["DeployModel"].timeout, + self.transport.export_model, + default_retry=self._method_configs["ExportModel"].retry, + default_timeout=self._method_configs["ExportModel"].timeout, client_info=self._client_info, ) - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, - image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, - ) - - request = service_pb2.DeployModelRequest( - name=name, - image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, - image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, + request = service_pb2.ExportModelRequest( + name=name, output_config=output_config, ) if metadata is None: metadata = [] @@ -2300,7 +2167,7 @@ def deploy_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["deploy_model"]( + operation = self._inner_api_calls["export_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( @@ -2310,19 +2177,25 @@ def deploy_model( metadata_type=proto_operations_pb2.OperationMetadata, ) - def undeploy_model( + def export_evaluated_examples( self, name, + output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Undeploys a model. If the model is not deployed this method has no - effect. + Exports examples on which the model was evaluated (i.e. which were + in the TEST set of the dataset the model was created from), together + with their ground truth annotations and the annotations created + (predicted) by the model. The examples, ground truth and predictions are + exported in the state they were at the moment the model was evaluated. - Only applicable for Text Classification, Image Object Detection and - Tables; all other domains manage deployment automatically. + This export is available only for 30 days since the model evaluation is + created. + + Currently only available for Tables. Returns an empty response in the ``response`` field when it completes. @@ -2333,7 +2206,10 @@ def undeploy_model( >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> response = client.undeploy_model(name) + >>> # TODO: Initialize `output_config`: + >>> output_config = {} + >>> + >>> response = client.export_evaluated_examples(name, output_config) >>> >>> def callback(operation_future): ... # Handle result. @@ -2345,7 +2221,12 @@ def undeploy_model( >>> metadata = response.metadata() Args: - name (str): Required. Resource name of the model to undeploy. + name (str): Required. The resource name of the model whose evaluated examples are to + be exported. + output_config (Union[dict, ~google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig]): Required. The desired output location and configuration. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2366,17 +2247,19 @@ def undeploy_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "undeploy_model" not in self._inner_api_calls: + if "export_evaluated_examples" not in self._inner_api_calls: self._inner_api_calls[ - "undeploy_model" + "export_evaluated_examples" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.undeploy_model, - default_retry=self._method_configs["UndeployModel"].retry, - default_timeout=self._method_configs["UndeployModel"].timeout, + self.transport.export_evaluated_examples, + default_retry=self._method_configs["ExportEvaluatedExamples"].retry, + default_timeout=self._method_configs["ExportEvaluatedExamples"].timeout, client_info=self._client_info, ) - request = service_pb2.UndeployModelRequest(name=name,) + request = service_pb2.ExportEvaluatedExamplesRequest( + name=name, output_config=output_config, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -2390,7 +2273,7 @@ def undeploy_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["undeploy_model"]( + operation = self._inner_api_calls["export_evaluated_examples"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( @@ -2468,3 +2351,120 @@ def get_model_evaluation( return self._inner_api_calls["get_model_evaluation"]( request, retry=retry, timeout=timeout, metadata=metadata ) + + def list_model_evaluations( + self, + parent, + filter_=None, + page_size=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Lists model evaluations. + + Example: + >>> from google.cloud import automl_v1beta1 + >>> + >>> client = automl_v1beta1.AutoMlClient() + >>> + >>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') + >>> + >>> # Iterate over all results + >>> for element in client.list_model_evaluations(parent): + ... # process element + ... pass + >>> + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time + >>> for page in client.list_model_evaluations(parent).pages: + ... for element in page: + ... # process element + ... pass + + Args: + parent (str): Required. Resource name of the model to list the model evaluations for. + If modelId is set as "-", this will list model evaluations from across all + models of the parent location. + filter_ (str): An expression for filtering the results of the request. + + - ``annotation_spec_id`` - for =, != or existence. See example below + for the last. + + Some examples of using the filter are: + + - ``annotation_spec_id!=4`` --> The model evaluation was done for + annotation spec with ID different than 4. + - ``NOT annotation_spec_id:*`` --> The model evaluation was done for + aggregate of all annotation specs. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. + You can also iterate over the pages of the response + using its `pages` property. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "list_model_evaluations" not in self._inner_api_calls: + self._inner_api_calls[ + "list_model_evaluations" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_model_evaluations, + default_retry=self._method_configs["ListModelEvaluations"].retry, + default_timeout=self._method_configs["ListModelEvaluations"].timeout, + client_info=self._client_info, + ) + + request = service_pb2.ListModelEvaluationsRequest( + parent=parent, filter=filter_, page_size=page_size, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_model_evaluations"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="model_evaluation", + request_token_field="page_token", + response_token_field="next_page_token", + ) + return iterator diff --git a/google/cloud/automl_v1beta1/gapic/auto_ml_client_config.py b/google/cloud/automl_v1beta1/gapic/auto_ml_client_config.py index 79d5b6ee..7319dbad 100644 --- a/google/cloud/automl_v1beta1/gapic/auto_ml_client_config.py +++ b/google/cloud/automl_v1beta1/gapic/auto_ml_client_config.py @@ -2,140 +2,159 @@ "interfaces": { "google.cloud.automl.v1beta1.AutoMl": { "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], + "retry_policy_1_codes": ["UNAVAILABLE", "DEADLINE_EXCEEDED"], + "no_retry_2_codes": [], + "no_retry_codes": [], }, "retry_params": { - "default": { + "retry_policy_1_params": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, + "initial_rpc_timeout_millis": 5000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } + "max_rpc_timeout_millis": 5000, + "total_timeout_millis": 5000, + }, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, + "no_retry_2_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 5000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 5000, + "total_timeout_millis": 5000, + }, }, "methods": { - "DeleteDataset": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ImportData": { - "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "ExportData": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "DeleteModel": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ExportModel": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "ExportEvaluatedExamples": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "ListModelEvaluations": { - "timeout_millis": 50000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, "CreateDataset": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, "GetDataset": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ListDatasets": { "timeout_millis": 50000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "UpdateDataset": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", + }, + "DeleteDataset": { + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", + }, + "ImportData": { + "timeout_millis": 20000, + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", + }, + "ExportData": { + "timeout_millis": 5000, + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, "GetAnnotationSpec": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "GetTableSpec": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ListTableSpecs": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "UpdateTableSpec": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, "GetColumnSpec": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ListColumnSpecs": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "UpdateColumnSpec": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, "CreateModel": { "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, "GetModel": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ListModels": { "timeout_millis": 50000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", + }, + "DeleteModel": { + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "DeployModel": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, "UndeployModel": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", + }, + "ExportModel": { + "timeout_millis": 5000, + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", + }, + "ExportEvaluatedExamples": { + "timeout_millis": 5000, + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, "GetModelEvaluation": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", + }, + "ListModelEvaluations": { + "timeout_millis": 50000, + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, }, } diff --git a/google/cloud/automl_v1beta1/gapic/prediction_service_client_config.py b/google/cloud/automl_v1beta1/gapic/prediction_service_client_config.py index d93ca92f..76c85878 100644 --- a/google/cloud/automl_v1beta1/gapic/prediction_service_client_config.py +++ b/google/cloud/automl_v1beta1/gapic/prediction_service_client_config.py @@ -1,31 +1,37 @@ config = { "interfaces": { "google.cloud.automl.v1beta1.PredictionService": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, + "retry_codes": {"no_retry_codes": [], "no_retry_1_codes": []}, "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, + "no_retry_1_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, "initial_rpc_timeout_millis": 60000, "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - } + "total_timeout_millis": 60000, + }, }, "methods": { "Predict": { "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "BatchPredict": { "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, }, } diff --git a/google/cloud/automl_v1beta1/gapic/transports/auto_ml_grpc_transport.py b/google/cloud/automl_v1beta1/gapic/transports/auto_ml_grpc_transport.py index 5a8e1b5d..8dcd0ec1 100644 --- a/google/cloud/automl_v1beta1/gapic/transports/auto_ml_grpc_transport.py +++ b/google/cloud/automl_v1beta1/gapic/transports/auto_ml_grpc_transport.py @@ -116,176 +116,105 @@ def channel(self): return self._channel @property - def delete_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.delete_dataset`. - - Deletes a dataset and all of its contents. Returns empty response in - the ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].DeleteDataset - - @property - def import_data(self): - """Return the gRPC stub for :meth:`AutoMlClient.import_data`. - - Imports data into a dataset. For Tables this method can only be - called on an empty Dataset. - - For Tables: - - - A ``schema_inference_version`` parameter must be explicitly set. - Returns an empty response in the ``response`` field when it - completes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ImportData - - @property - def export_data(self): - """Return the gRPC stub for :meth:`AutoMlClient.export_data`. - - Exports dataset's data to the provided output location. Returns an - empty response in the ``response`` field when it completes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].ExportData - - @property - def delete_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.delete_model`. + def create_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.create_dataset`. - Deletes a model. Returns ``google.protobuf.Empty`` in the - ``response`` field when it completes, and ``delete_details`` in the - ``metadata`` field. + Creates a dataset. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].DeleteModel + return self._stubs["auto_ml_stub"].CreateDataset @property - def export_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.export_model`. - - Exports a trained, "export-able", model to a user specified Google - Cloud Storage location. A model is considered export-able if and only if - it has an export format defined for it in - - ``ModelExportOutputConfig``. + def get_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.get_dataset`. - Returns an empty response in the ``response`` field when it completes. + Gets a dataset. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ExportModel + return self._stubs["auto_ml_stub"].GetDataset @property - def export_evaluated_examples(self): - """Return the gRPC stub for :meth:`AutoMlClient.export_evaluated_examples`. - - Exports examples on which the model was evaluated (i.e. which were - in the TEST set of the dataset the model was created from), together - with their ground truth annotations and the annotations created - (predicted) by the model. The examples, ground truth and predictions are - exported in the state they were at the moment the model was evaluated. - - This export is available only for 30 days since the model evaluation is - created. - - Currently only available for Tables. + def list_datasets(self): + """Return the gRPC stub for :meth:`AutoMlClient.list_datasets`. - Returns an empty response in the ``response`` field when it completes. + Lists datasets in a project. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ExportEvaluatedExamples + return self._stubs["auto_ml_stub"].ListDatasets @property - def list_model_evaluations(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_model_evaluations`. + def update_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.update_dataset`. - Lists model evaluations. + Updates a dataset. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ListModelEvaluations + return self._stubs["auto_ml_stub"].UpdateDataset @property - def create_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.create_dataset`. + def delete_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.delete_dataset`. - Creates a dataset. + Deletes a dataset and all of its contents. Returns empty response in + the ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].CreateDataset + return self._stubs["auto_ml_stub"].DeleteDataset @property - def get_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_dataset`. - - Gets a dataset. + def import_data(self): + """Return the gRPC stub for :meth:`AutoMlClient.import_data`. - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["auto_ml_stub"].GetDataset + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. - @property - def list_datasets(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_datasets`. + For Tables: - Lists datasets in a project. + - A ``schema_inference_version`` parameter must be explicitly set. + Returns an empty response in the ``response`` field when it + completes. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ListDatasets + return self._stubs["auto_ml_stub"].ImportData @property - def update_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_dataset`. + def export_data(self): + """Return the gRPC stub for :meth:`AutoMlClient.export_data`. - Updates a dataset. + Exports dataset's data to the provided output location. Returns an + empty response in the ``response`` field when it completes. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].UpdateDataset + return self._stubs["auto_ml_stub"].ExportData @property def get_annotation_spec(self): @@ -420,6 +349,21 @@ def list_models(self): """ return self._stubs["auto_ml_stub"].ListModels + @property + def delete_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.delete_model`. + + Deletes a model. Returns ``google.protobuf.Empty`` in the + ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["auto_ml_stub"].DeleteModel + @property def deploy_model(self): """Return the gRPC stub for :meth:`AutoMlClient.deploy_model`. @@ -463,6 +407,49 @@ def undeploy_model(self): """ return self._stubs["auto_ml_stub"].UndeployModel + @property + def export_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.export_model`. + + Exports a trained, "export-able", model to a user specified Google + Cloud Storage location. A model is considered export-able if and only if + it has an export format defined for it in + + ``ModelExportOutputConfig``. + + Returns an empty response in the ``response`` field when it completes. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["auto_ml_stub"].ExportModel + + @property + def export_evaluated_examples(self): + """Return the gRPC stub for :meth:`AutoMlClient.export_evaluated_examples`. + + Exports examples on which the model was evaluated (i.e. which were + in the TEST set of the dataset the model was created from), together + with their ground truth annotations and the annotations created + (predicted) by the model. The examples, ground truth and predictions are + exported in the state they were at the moment the model was evaluated. + + This export is available only for 30 days since the model evaluation is + created. + + Currently only available for Tables. + + Returns an empty response in the ``response`` field when it completes. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["auto_ml_stub"].ExportEvaluatedExamples + @property def get_model_evaluation(self): """Return the gRPC stub for :meth:`AutoMlClient.get_model_evaluation`. @@ -475,3 +462,16 @@ def get_model_evaluation(self): deserialized response object. """ return self._stubs["auto_ml_stub"].GetModelEvaluation + + @property + def list_model_evaluations(self): + """Return the gRPC stub for :meth:`AutoMlClient.list_model_evaluations`. + + Lists model evaluations. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["auto_ml_stub"].ListModelEvaluations diff --git a/synth.metadata b/synth.metadata index a6dc5e61..54dcde69 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "dd25bfd61bce9a24c7a7793af4fb7ac61e3f6542", - "internalRef": "317760936" + "sha": "5b85137bf6fb01dcf8a949a6a04eee6ed0c22bec", + "internalRef": "317760971" } }, { diff --git a/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py b/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py index 9438e361..87d1fe03 100644 --- a/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py +++ b/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py @@ -71,67 +71,76 @@ class CustomException(Exception): class TestAutoMlClient(object): - def test_delete_dataset(self): + def test_create_dataset(self): # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_delete_dataset", done=True - ) - operation.response.Pack(expected_response) + name = "name3373707" + display_name = "displayName1615086568" + description = "description-1724546052" + example_count = 1517063674 + etag = "etag3123477" + expected_response = { + "name": name, + "display_name": display_name, + "description": description, + "example_count": example_count, + "etag": etag, + } + expected_response = dataset_pb2.Dataset(**expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") + parent = client.location_path("[PROJECT]", "[LOCATION]") + dataset = {} - response = client.delete_dataset(name) - result = response.result() - assert expected_response == result + response = client.create_dataset(parent, dataset) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = service_pb2.DeleteDatasetRequest(name=name) + expected_request = service_pb2.CreateDatasetRequest( + parent=parent, dataset=dataset + ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_delete_dataset_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_delete_dataset_exception", done=True - ) - operation.error.CopyFrom(error) - + def test_create_dataset_exception(self): # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") + # Setup request + parent = client.location_path("[PROJECT]", "[LOCATION]") + dataset = {} - response = client.delete_dataset(name) - exception = response.exception() - assert exception.errors[0] == error + with pytest.raises(CustomException): + client.create_dataset(parent, dataset) - def test_import_data(self): + def test_get_dataset(self): # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_import_data", done=True - ) - operation.response.Pack(expected_response) + name_2 = "name2-1052831874" + display_name = "displayName1615086568" + description = "description-1724546052" + example_count = 1517063674 + etag = "etag3123477" + expected_response = { + "name": name_2, + "display_name": display_name, + "description": description, + "example_count": example_count, + "etag": etag, + } + expected_response = dataset_pb2.Dataset(**expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel @@ -139,152 +148,126 @@ def test_import_data(self): # Setup Request name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - input_config = {} - response = client.import_data(name, input_config) - result = response.result() - assert expected_response == result + response = client.get_dataset(name) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = service_pb2.ImportDataRequest( - name=name, input_config=input_config - ) + expected_request = service_pb2.GetDatasetRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_import_data_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_import_data_exception", done=True - ) - operation.error.CopyFrom(error) - + def test_get_dataset_exception(self): # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup Request + # Setup request name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - input_config = {} - response = client.import_data(name, input_config) - exception = response.exception() - assert exception.errors[0] == error + with pytest.raises(CustomException): + client.get_dataset(name) - def test_export_data(self): + def test_list_datasets(self): # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_export_data", done=True - ) - operation.response.Pack(expected_response) + next_page_token = "" + datasets_element = {} + datasets = [datasets_element] + expected_response = {"next_page_token": next_page_token, "datasets": datasets} + expected_response = service_pb2.ListDatasetsResponse(**expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - output_config = {} + parent = client.location_path("[PROJECT]", "[LOCATION]") - response = client.export_data(name, output_config) - result = response.result() - assert expected_response == result + paged_list_response = client.list_datasets(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.datasets[0] == resources[0] assert len(channel.requests) == 1 - expected_request = service_pb2.ExportDataRequest( - name=name, output_config=output_config - ) + expected_request = service_pb2.ListDatasetsRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_export_data_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_export_data_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) + def test_list_datasets_exception(self): + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - output_config = {} + # Setup request + parent = client.location_path("[PROJECT]", "[LOCATION]") - response = client.export_data(name, output_config) - exception = response.exception() - assert exception.errors[0] == error + paged_list_response = client.list_datasets(parent) + with pytest.raises(CustomException): + list(paged_list_response) - def test_delete_model(self): + def test_update_dataset(self): # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_delete_model", done=True - ) - operation.response.Pack(expected_response) + name = "name3373707" + display_name = "displayName1615086568" + description = "description-1724546052" + example_count = 1517063674 + etag = "etag3123477" + expected_response = { + "name": name, + "display_name": display_name, + "description": description, + "example_count": example_count, + "etag": etag, + } + expected_response = dataset_pb2.Dataset(**expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + dataset = {} - response = client.delete_model(name) - result = response.result() - assert expected_response == result + response = client.update_dataset(dataset) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = service_pb2.DeleteModelRequest(name=name) + expected_request = service_pb2.UpdateDatasetRequest(dataset=dataset) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_delete_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_delete_model_exception", done=True - ) - operation.error.CopyFrom(error) - + def test_update_dataset_exception(self): # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + # Setup request + dataset = {} - response = client.delete_model(name) - exception = response.exception() - assert exception.errors[0] == error + with pytest.raises(CustomException): + client.update_dataset(dataset) - def test_export_model(self): + def test_delete_dataset(self): # Setup Expected Response expected_response = {} expected_response = empty_pb2.Empty(**expected_response) operation = operations_pb2.Operation( - name="operations/test_export_model", done=True + name="operations/test_delete_dataset", done=True ) operation.response.Pack(expected_response) @@ -296,25 +279,22 @@ def test_export_model(self): client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} + name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - response = client.export_model(name, output_config) + response = client.delete_dataset(name) result = response.result() assert expected_response == result assert len(channel.requests) == 1 - expected_request = service_pb2.ExportModelRequest( - name=name, output_config=output_config - ) + expected_request = service_pb2.DeleteDatasetRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_export_model_exception(self): + def test_delete_dataset_exception(self): # Setup Response error = status_pb2.Status() operation = operations_pb2.Operation( - name="operations/test_export_model_exception", done=True + name="operations/test_delete_dataset_exception", done=True ) operation.error.CopyFrom(error) @@ -326,19 +306,18 @@ def test_export_model_exception(self): client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} + name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - response = client.export_model(name, output_config) + response = client.delete_dataset(name) exception = response.exception() assert exception.errors[0] == error - def test_export_evaluated_examples(self): + def test_import_data(self): # Setup Expected Response expected_response = {} expected_response = empty_pb2.Empty(**expected_response) operation = operations_pb2.Operation( - name="operations/test_export_evaluated_examples", done=True + name="operations/test_import_data", done=True ) operation.response.Pack(expected_response) @@ -350,25 +329,25 @@ def test_export_evaluated_examples(self): client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} + name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") + input_config = {} - response = client.export_evaluated_examples(name, output_config) + response = client.import_data(name, input_config) result = response.result() assert expected_response == result assert len(channel.requests) == 1 - expected_request = service_pb2.ExportEvaluatedExamplesRequest( - name=name, output_config=output_config + expected_request = service_pb2.ImportDataRequest( + name=name, input_config=input_config ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_export_evaluated_examples_exception(self): + def test_import_data_exception(self): # Setup Response error = status_pb2.Status() operation = operations_pb2.Operation( - name="operations/test_export_evaluated_examples_exception", done=True + name="operations/test_import_data_exception", done=True ) operation.error.CopyFrom(error) @@ -380,251 +359,66 @@ def test_export_evaluated_examples_exception(self): client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} - - response = client.export_evaluated_examples(name, output_config) - exception = response.exception() - assert exception.errors[0] == error - - def test_list_model_evaluations(self): - # Setup Expected Response - next_page_token = "" - model_evaluation_element = {} - model_evaluation = [model_evaluation_element] - expected_response = { - "next_page_token": next_page_token, - "model_evaluation": model_evaluation, - } - expected_response = service_pb2.ListModelEvaluationsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - paged_list_response = client.list_model_evaluations(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.model_evaluation[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = service_pb2.ListModelEvaluationsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_model_evaluations_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - - paged_list_response = client.list_model_evaluations(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_create_dataset(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - description = "description-1724546052" - example_count = 1517063674 - etag = "etag3123477" - expected_response = { - "name": name, - "display_name": display_name, - "description": description, - "example_count": example_count, - "etag": etag, - } - expected_response = dataset_pb2.Dataset(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - dataset = {} - - response = client.create_dataset(parent, dataset) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.CreateDatasetRequest( - parent=parent, dataset=dataset - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_dataset_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - parent = client.location_path("[PROJECT]", "[LOCATION]") - dataset = {} - - with pytest.raises(CustomException): - client.create_dataset(parent, dataset) - - def test_get_dataset(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - description = "description-1724546052" - example_count = 1517063674 - etag = "etag3123477" - expected_response = { - "name": name_2, - "display_name": display_name, - "description": description, - "example_count": example_count, - "etag": etag, - } - expected_response = dataset_pb2.Dataset(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - response = client.get_dataset(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.GetDatasetRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_dataset_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - with pytest.raises(CustomException): - client.get_dataset(name) - - def test_list_datasets(self): - # Setup Expected Response - next_page_token = "" - datasets_element = {} - datasets = [datasets_element] - expected_response = {"next_page_token": next_page_token, "datasets": datasets} - expected_response = service_pb2.ListDatasetsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - - paged_list_response = client.list_datasets(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.datasets[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = service_pb2.ListDatasetsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_datasets_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - parent = client.location_path("[PROJECT]", "[LOCATION]") - - paged_list_response = client.list_datasets(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_dataset(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - description = "description-1724546052" - example_count = 1517063674 - etag = "etag3123477" - expected_response = { - "name": name, - "display_name": display_name, - "description": description, - "example_count": example_count, - "etag": etag, - } - expected_response = dataset_pb2.Dataset(**expected_response) + name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") + input_config = {} + + response = client.import_data(name, input_config) + exception = response.exception() + assert exception.errors[0] == error + + def test_export_data(self): + # Setup Expected Response + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_export_data", done=True + ) + operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[expected_response]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - dataset = {} + name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") + output_config = {} - response = client.update_dataset(dataset) - assert expected_response == response + response = client.export_data(name, output_config) + result = response.result() + assert expected_response == result assert len(channel.requests) == 1 - expected_request = service_pb2.UpdateDatasetRequest(dataset=dataset) + expected_request = service_pb2.ExportDataRequest( + name=name, output_config=output_config + ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_update_dataset_exception(self): + def test_export_data_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_export_data_exception", done=True + ) + operation.error.CopyFrom(error) + # Mock the API response - channel = ChannelStub(responses=[CustomException()]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup request - dataset = {} + # Setup Request + name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") + output_config = {} - with pytest.raises(CustomException): - client.update_dataset(dataset) + response = client.export_data(name, output_config) + exception = response.exception() + assert exception.errors[0] == error def test_get_annotation_spec(self): # Setup Expected Response @@ -1104,6 +898,56 @@ def test_list_models_exception(self): with pytest.raises(CustomException): list(paged_list_response) + def test_delete_model(self): + # Setup Expected Response + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_delete_model", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + + response = client.delete_model(name) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = service_pb2.DeleteModelRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_model_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_delete_model_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + + response = client.delete_model(name) + exception = response.exception() + assert exception.errors[0] == error + def test_deploy_model(self): # Setup Expected Response expected_response = {} @@ -1204,6 +1048,114 @@ def test_undeploy_model_exception(self): exception = response.exception() assert exception.errors[0] == error + def test_export_model(self): + # Setup Expected Response + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_export_model", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + output_config = {} + + response = client.export_model(name, output_config) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = service_pb2.ExportModelRequest( + name=name, output_config=output_config + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_export_model_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_export_model_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + output_config = {} + + response = client.export_model(name, output_config) + exception = response.exception() + assert exception.errors[0] == error + + def test_export_evaluated_examples(self): + # Setup Expected Response + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_export_evaluated_examples", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + output_config = {} + + response = client.export_evaluated_examples(name, output_config) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = service_pb2.ExportEvaluatedExamplesRequest( + name=name, output_config=output_config + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_export_evaluated_examples_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_export_evaluated_examples_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + output_config = {} + + response = client.export_evaluated_examples(name, output_config) + exception = response.exception() + assert exception.errors[0] == error + def test_get_model_evaluation(self): # Setup Expected Response name_2 = "name2-1052831874" @@ -1253,3 +1205,51 @@ def test_get_model_evaluation_exception(self): with pytest.raises(CustomException): client.get_model_evaluation(name) + + def test_list_model_evaluations(self): + # Setup Expected Response + next_page_token = "" + model_evaluation_element = {} + model_evaluation = [model_evaluation_element] + expected_response = { + "next_page_token": next_page_token, + "model_evaluation": model_evaluation, + } + expected_response = service_pb2.ListModelEvaluationsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + + paged_list_response = client.list_model_evaluations(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.model_evaluation[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = service_pb2.ListModelEvaluationsRequest(parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_model_evaluations_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup request + parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + + paged_list_response = client.list_model_evaluations(parent) + with pytest.raises(CustomException): + list(paged_list_response) From c6a1bfa46fb2e91f445fb430573d64f6ea0ce48b Mon Sep 17 00:00:00 2001 From: Bu Sun Kim Date: Fri, 26 Jun 2020 04:39:35 +0000 Subject: [PATCH 8/9] docs: add multiprocessing note --- docs/index.rst | 2 + .../cloud/automl_v1/gapic/auto_ml_client.py | 4 +- google/cloud/automl_v1/proto/io_pb2.py | 6 +-- google/cloud/automl_v1/proto/service_pb2.py | 4 +- .../automl_v1beta1/gapic/auto_ml_client.py | 4 +- .../cloud/automl_v1beta1/proto/image_pb2.py | 2 +- google/cloud/automl_v1beta1/proto/io_pb2.py | 24 ++---------- .../cloud/automl_v1beta1/proto/service_pb2.py | 4 +- .../cloud/automl_v1beta1/proto/tables_pb2.py | 2 +- noxfile.py | 3 +- synth.py | 37 ++++++++++++++----- 11 files changed, 48 insertions(+), 44 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 90c2bfd5..5473e0d7 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,5 +1,7 @@ .. include:: README.rst +.. include:: multiprocessing.rst + This package includes clients for multiple versions of the Cloud AutoML API. By default, you will get ``v1``, the latest stable version. diff --git a/google/cloud/automl_v1/gapic/auto_ml_client.py b/google/cloud/automl_v1/gapic/auto_ml_client.py index 8cd6ee34..a870b6bf 100644 --- a/google/cloud/automl_v1/gapic/auto_ml_client.py +++ b/google/cloud/automl_v1/gapic/auto_ml_client.py @@ -465,7 +465,7 @@ def list_datasets( filter_ (str): An expression for filtering the results of the request. - ``dataset_metadata`` - for existence of the case (e.g. - image_classification_dataset_metadata:*). Some examples of using the + ````image_classification_dataset_metadata````). Some examples of using the filter are: - ``translation_dataset_metadata:*`` --> The dataset has @@ -1173,7 +1173,7 @@ def list_models( filter_ (str): An expression for filtering the results of the request. - ``model_metadata`` - for existence of the case (e.g. - video_classification_model_metadata:*). + ````video_classification_model_metadata:*````). - ``dataset_id`` - for = or !=. Some examples of using the filter are: diff --git a/google/cloud/automl_v1/proto/io_pb2.py b/google/cloud/automl_v1/proto/io_pb2.py index 90f14fea..8a784f96 100644 --- a/google/cloud/automl_v1/proto/io_pb2.py +++ b/google/cloud/automl_v1/proto/io_pb2.py @@ -944,7 +944,7 @@ raw:: html
    "Id","First Name","Last
   Name","Dob","Addresses"     "1","John","Doe","1968-01-22","[{"status":
   "current","address":"123_First_Avenue","city":"Seattle","state":"WA","
-  zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_
+  zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456\_
   Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYea
   rs":"5"}]"     "2","Jane","Doe","1980-10-16","[{"status":"current","ad
   dress":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","nu
@@ -1271,7 +1271,7 @@
   depends on total number of failed predictions). These files will have
   a JSON representation of a proto that wraps the same “ID” : “” but
   here followed by exactly one  ```google.rpc.Status`` `__
+  b.com/googleapis/googleapis/blob/master/google/rpc/status.proto>`_\_
   containing only ``code`` and ``message``\ fields.  -  For Image Object
   Detection: In the created directory files
   ``image_object_detection_1.jsonl``,
@@ -1433,7 +1433,7 @@
   be created (N depends on total number of failed rows). These files
   will have analogous format as ``tables_*.csv``, but always with a
   single target column having  ```google.rpc.Status`` `__
+  .com/googleapis/googleapis/blob/master/google/rpc/status.proto>`_\_
   represented as a JSON string, and containing only ``code`` and
   ``message``. BigQuery case:  [bigquery_destination][google.cloud.autom
   l.v1p1beta.OutputConfig.bigquery_destination] pointing to a BigQuery
diff --git a/google/cloud/automl_v1/proto/service_pb2.py b/google/cloud/automl_v1/proto/service_pb2.py
index 7b6bfcf2..83647c90 100644
--- a/google/cloud/automl_v1/proto/service_pb2.py
+++ b/google/cloud/automl_v1/proto/service_pb2.py
@@ -1488,7 +1488,7 @@
       filter:
           An expression for filtering the results of the request.  -
           ``dataset_metadata`` - for existence of the case (
-          e.g. image_classification_dataset_metadata:*). Some examples
+          e.g. ``image_classification_dataset_metadata``). Some examples
           of using    the filter are:  -
           ``translation_dataset_metadata:*`` –> The dataset has
           translation_dataset_metadata.
@@ -1684,7 +1684,7 @@
       filter:
           An expression for filtering the results of the request.  -
           ``model_metadata`` - for existence of the case (
-          e.g. video_classification_model_metadata:*). -  ``dataset_id``
+          e.g. ``video_classification_model_metadata:*``). -  ``dataset_id``
           - for = or !=. Some examples of using the filter are:  -
           ``image_classification_model_metadata:*`` –> The model has
           image_classification_model_metadata. -  ``dataset_id=5`` –>
diff --git a/google/cloud/automl_v1beta1/gapic/auto_ml_client.py b/google/cloud/automl_v1beta1/gapic/auto_ml_client.py
index 05169685..3ce313fa 100644
--- a/google/cloud/automl_v1beta1/gapic/auto_ml_client.py
+++ b/google/cloud/automl_v1beta1/gapic/auto_ml_client.py
@@ -475,7 +475,7 @@ def list_datasets(
             filter_ (str): An expression for filtering the results of the request.
 
                 -  ``dataset_metadata`` - for existence of the case (e.g.
-                   image_classification_dataset_metadata:*). Some examples of using the
+                   ``image_classification_dataset_metadata``). Some examples of using the
                    filter are:
 
                 -  ``translation_dataset_metadata:*`` --> The dataset has
@@ -1708,7 +1708,7 @@ def list_models(
             filter_ (str): An expression for filtering the results of the request.
 
                 -  ``model_metadata`` - for existence of the case (e.g.
-                   video_classification_model_metadata:*).
+                   ``video_classification_model_metadata:*``).
 
                 -  ``dataset_id`` - for = or !=. Some examples of using the filter are:
 
diff --git a/google/cloud/automl_v1beta1/proto/image_pb2.py b/google/cloud/automl_v1beta1/proto/image_pb2.py
index 73765cfa..6f17f2c5 100644
--- a/google/cloud/automl_v1beta1/proto/image_pb2.py
+++ b/google/cloud/automl_v1beta1/proto/image_pb2.py
@@ -691,7 +691,7 @@
           Input only. The number of nodes to deploy the model on. A node
           is an abstraction of a machine resource, which can handle
           online prediction QPS as given in the model’s  [node_qps][goog
-          le.cloud.automl.v1beta1.ImageClassificationModelMetadata.node_
+          le.cloud.automl.v1beta1.ImageClassificationModelMetadata.node\_
           qps]. Must be between 1 and 100, inclusive on both ends.
   """,
         # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageClassificationModelDeploymentMetadata)
diff --git a/google/cloud/automl_v1beta1/proto/io_pb2.py b/google/cloud/automl_v1beta1/proto/io_pb2.py
index 36ca7cd0..13bd8be1 100644
--- a/google/cloud/automl_v1beta1/proto/io_pb2.py
+++ b/google/cloud/automl_v1beta1/proto/io_pb2.py
@@ -1290,23 +1290,7 @@
   wraps a Document proto with    input_config set. Only PDF documents
   are supported now, and each    document must be up to 2MB large. Any
   given .JSONL file must be 100MB    or smaller, and no more than 20
-  files may be given. Sample in-line    JSON Lines file (presented here
-  with artificial line breaks, but the    only actual line break is
-  denoted by :raw-latex:`\n`): { “id”:    “my_first_id”, “text_snippet”:
-  { “content”: “dog car cat”},    “text_features”: [ { “text_segment”:
-  {“start_offset”: 4,    “end_offset”: 6}, “structural_type”: PARAGRAPH,
-  “bounding_poly”: {    “normalized_vertices”: [ {“x”: 0.1, “y”: 0.1},
-  {“x”: 0.1, “y”: 0.3},    {“x”: 0.3, “y”: 0.3}, {“x”: 0.3, “y”: 0.1}, ]
-  }, } ], }:raw-latex:`\n           {             "id": "2",
-  "text_snippet": {               "content": "An elaborate content",
-  "mime_type": "text/plain"             }           }` Sample document
-  JSON Lines file (presented here with    artificial line breaks, but
-  the only actual line break is denoted by    :raw-latex:`\n`).: {
-  “document”: { “input_config”: { “gcs_source”: {    “input_uris”: [
-  “gs://folder/document1.pdf” ] } } } }:raw-latex:`\n           {
-  "document": {               "input_config": {
-  "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
-  }               }             }           }`  -  For Tables: Either
+  files may be given. -  For Tables: Either
   [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or  [
   bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_sour
   ce]. GCS case: CSV file(s), each by itself 10GB or smaller and total
@@ -1466,7 +1450,7 @@
   depends on total number of failed predictions). These files will have
   a JSON representation of a proto that wraps the same “ID” : “” but
   here followed by exactly one  ```google.rpc.Status`` `__
+  b.com/googleapis/googleapis/blob/master/google/rpc/status.proto>`_\_
   containing only ``code`` and ``message``\ fields.  -  For Image Object
   Detection: In the created directory files
   ``image_object_detection_1.jsonl``,
@@ -1627,7 +1611,7 @@
   be created (N depends on total number of failed rows). These files
   will have analogous format as ``tables_*.csv``, but always with a
   single target column having  ```google.rpc.Status`` `__
+  .com/googleapis/googleapis/blob/master/google/rpc/status.proto>`_\_
   represented as a JSON string, and containing only ``code`` and
   ``message``. BigQuery case:  [bigquery_destination][google.cloud.autom
   l.v1beta1.OutputConfig.bigquery_destination] pointing to a BigQuery
@@ -1719,7 +1703,7 @@
           mobile devices. -  edgetpu_tflite - Used for `Edge    TPU
           `__ devices. -
           tf_saved_model - A tensorflow model in SavedModel format. -
-          tf_js - A `TensorFlow.js `__
+          tf_js - A `TensorFlow.js `_\_
           model    that can be used in the browser and in Node.js using
           JavaScript. -  docker - Used for Docker containers. Use the
           params field to    customize the container. The container is
diff --git a/google/cloud/automl_v1beta1/proto/service_pb2.py b/google/cloud/automl_v1beta1/proto/service_pb2.py
index 50e77bf1..b8c20941 100644
--- a/google/cloud/automl_v1beta1/proto/service_pb2.py
+++ b/google/cloud/automl_v1beta1/proto/service_pb2.py
@@ -2145,7 +2145,7 @@
       filter:
           An expression for filtering the results of the request.  -
           ``dataset_metadata`` - for existence of the case (
-          e.g. image_classification_dataset_metadata:*). Some examples
+          e.g. ``image_classification_dataset_metadata``). Some examples
           of using    the filter are:  -
           ``translation_dataset_metadata:*`` –> The dataset has
           translation_dataset_metadata.
@@ -2534,7 +2534,7 @@
       filter:
           An expression for filtering the results of the request.  -
           ``model_metadata`` - for existence of the case (
-          e.g. video_classification_model_metadata:*). -  ``dataset_id``
+          e.g. ``video_classification_model_metadata:*``). -  ``dataset_id``
           - for = or !=. Some examples of using the filter are:  -
           ``image_classification_model_metadata:*`` –> The model has
           image_classification_model_metadata. -  ``dataset_id=5`` –>
diff --git a/google/cloud/automl_v1beta1/proto/tables_pb2.py b/google/cloud/automl_v1beta1/proto/tables_pb2.py
index 96fb1098..7335fe3d 100644
--- a/google/cloud/automl_v1beta1/proto/tables_pb2.py
+++ b/google/cloud/automl_v1beta1/proto/tables_pb2.py
@@ -919,7 +919,7 @@
           nput_feature_column_specs][google.cloud.automl.v1beta1.TablesM
           odelMetadata.input_feature_column_specs] with respect to this
           particular prediction. If no other fields than  [column_spec_n
-          ame][google.cloud.automl.v1beta1.TablesModelColumnInfo.column_
+          ame][google.cloud.automl.v1beta1.TablesModelColumnInfo.column\_
           spec_name] and  [column_display_name][google.cloud.automl.v1be
           ta1.TablesModelColumnInfo.column_display_name] would be
           populated, then this whole field is not.
diff --git a/noxfile.py b/noxfile.py
index 4e919f74..e82d68dc 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -146,12 +146,11 @@ def docs(session):
     """Build the docs for this library."""
 
     session.install("-e", ".[pandas,storage]")
-    session.install("sphinx", "alabaster", "recommonmark")
+    session.install("sphinx<3.0.0", "alabaster", "recommonmark")
 
     shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
     session.run(
         "sphinx-build",
-        "-W",  # warnings as errors
         "-T",  # show full traceback on exception
         "-N",  # no colors
         "-b",
diff --git a/synth.py b/synth.py
index da66864c..498fdba3 100644
--- a/synth.py
+++ b/synth.py
@@ -105,14 +105,6 @@
     flags=re.DOTALL,
 )
 
-# Replace docstring with no summary line
-s.replace(
-    "google/cloud/**/io_pb2.py",
-    r"""__doc__ = \"\"\"-  For Translation: CSV file ``translation\.csv``, with each """,
-    r'''__doc__ = """
--  For Translation: CSV file ``translation.csv``, with each ''',
-    flags=re.DOTALL,
-)
 
 s.replace("google/cloud/**/io_pb2.py", r":raw-latex:`\\t `", r"\\\\t")
 
@@ -131,15 +123,42 @@
 # Make \n visible in JSONL samples
 s.replace("google/cloud/**/io_pb2.py", r"\}\\n", r"}\\\\n")
 
+# properly escape emphasis
+s.replace("google/cloud/**/*.py",
+"""image_classification_dataset_metadata:\*""",
+"""``image_classification_dataset_metadata``""")
+
+s.replace("google/cloud/**/*.py",
+"""video_classification_model_metadata:\*""",
+"""``video_classification_model_metadata:*``""")
+
+# Escape '_' at the end of the line in pb2 docstrings
+s.replace(
+"google/cloud/**/*_pb2.py",
+"""\_$""",
+"""\_""",
+)
 # ----------------------------------------------------------------------------
 # Add templated files
 # ----------------------------------------------------------------------------
 templated_files = common.py_library(
-    unit_cov_level=82, cov_level=83, system_test_dependencies=["test_utils"]
+    unit_cov_level=82, cov_level=83
 )
 
 s.move(templated_files)
 
+# TODO(busunkim): Use latest sphinx after microgenerator transition
+s.replace("noxfile.py", """['"]sphinx['"]""", '"sphinx<3.0.0"')
+# TODO(busunkim): Remove after microgenerator transition.
+# This is being added to AutoML because the proto comments are long and
+# regex replaces are a brittle temporary solution. 
+s.replace(
+"noxfile.py", 
+""""-W",  # warnings as errors
+\s+"-T",  \# show full traceback on exception""",
+""""-T",  # show full traceback on exception""")
+
+
 # install with extras (pandas, storage)
 s.replace(
     "noxfile.py",

From a5b05daebc1bc2e1917fea634491a97fa73a3a9a Mon Sep 17 00:00:00 2001
From: Bu Sun Kim 
Date: Fri, 26 Jun 2020 04:47:33 +0000
Subject: [PATCH 9/9] chore: remove testutils local install

---
 noxfile.py | 1 -
 1 file changed, 1 deletion(-)

diff --git a/noxfile.py b/noxfile.py
index e82d68dc..512fe04a 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -118,7 +118,6 @@ def system(session):
     session.install(
         "mock", "pytest", "google-cloud-testutils",
     )
-    session.install("-e", "test_utils")
     session.install("-e", ".[pandas,storage]")
 
     # Run py.test against the system tests.